You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2015/09/30 16:45:21 UTC

[01/50] [abbrv] ambari git commit: AMBARI-13230. Comparing configs works wrong for some services (akovalenko)

Repository: ambari
Updated Branches:
  refs/heads/branch-dev-patch-upgrade 0fc88bed2 -> bff61b83e


AMBARI-13230. Comparing configs works wrong for some services (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/57733408
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/57733408
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/57733408

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 57733408464fb798108391b3e2585732602a9932
Parents: 04febac
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Thu Sep 24 19:52:59 2015 +0300
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Thu Sep 24 19:55:13 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/mixins/common/configs/configs_comparator.js | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/57733408/ambari-web/app/mixins/common/configs/configs_comparator.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/configs/configs_comparator.js b/ambari-web/app/mixins/common/configs/configs_comparator.js
index 191a3a3..cda5407 100644
--- a/ambari-web/app/mixins/common/configs/configs_comparator.js
+++ b/ambari-web/app/mixins/common/configs/configs_comparator.js
@@ -270,8 +270,8 @@ App.ConfigsComparator = Em.Mixin.create({
    * @method hasCompareDiffs
    */
   hasCompareDiffs: function (originalConfig, compareConfig) {
-    var originalValue = Em.get(originalConfig, 'value');
-    var compareValue = Em.get(compareConfig, 'value');
+    var originalValue = App.config.trimProperty({ value: Em.get(originalConfig, 'value'), displayType: 'advanced' });
+    var compareValue = App.config.trimProperty({ value: Em.get(compareConfig, 'value'), displayType: 'advanced' });
 
     if (originalValue.toArray) {
       originalValue = originalValue.toArray();


[43/50] [abbrv] ambari git commit: AMBARI-13229. When non-supported version of python is set as default, Ambari fails in multiple places (aonishuk)

Posted by nc...@apache.org.
AMBARI-13229. When non-supported version of python is set as default, Ambari fails in multiple places (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9f5d2978
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9f5d2978
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9f5d2978

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 9f5d29784af26a3a98fa83387eb3c610600a7cfb
Parents: c8ea857
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Sep 29 13:10:24 2015 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Sep 29 13:10:52 2015 +0300

----------------------------------------------------------------------
 .../libraries/functions/conf_select.py                  |  2 +-
 .../test/python/stacks/2.0.6/HBASE/test_hbase_client.py |  8 ++++----
 .../test/python/stacks/2.0.6/HBASE/test_hbase_master.py |  4 ++--
 .../stacks/2.0.6/HBASE/test_hbase_regionserver.py       |  4 ++--
 .../src/test/python/stacks/2.0.6/HDFS/test_datanode.py  |  4 ++--
 .../test/python/stacks/2.0.6/HDFS/test_hdfs_client.py   |  4 ++--
 .../test/python/stacks/2.0.6/HDFS/test_journalnode.py   |  4 ++--
 .../src/test/python/stacks/2.0.6/HDFS/test_namenode.py  |  4 ++--
 .../test/python/stacks/2.0.6/HIVE/test_hive_client.py   |  8 ++++----
 .../test/python/stacks/2.0.6/HIVE/test_hive_server.py   |  4 ++--
 .../python/stacks/2.0.6/HIVE/test_webhcat_server.py     |  8 ++++----
 .../test/python/stacks/2.0.6/OOZIE/test_oozie_client.py |  4 ++--
 .../test/python/stacks/2.0.6/OOZIE/test_oozie_server.py |  8 ++++----
 .../src/test/python/stacks/2.0.6/PIG/test_pig_client.py |  8 ++++----
 .../src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py    |  4 ++--
 .../test/python/stacks/2.0.6/YARN/test_historyserver.py |  4 ++--
 .../python/stacks/2.0.6/YARN/test_mapreduce2_client.py  |  4 ++--
 .../test/python/stacks/2.0.6/YARN/test_nodemanager.py   |  4 ++--
 .../python/stacks/2.0.6/YARN/test_resourcemanager.py    |  4 ++--
 .../test/python/stacks/2.0.6/YARN/test_yarn_client.py   |  4 ++--
 .../stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py     |  4 ++--
 .../stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py     |  4 ++--
 .../test/python/stacks/2.1/FALCON/test_falcon_client.py |  4 ++--
 .../test/python/stacks/2.1/FALCON/test_falcon_server.py |  4 ++--
 .../test/python/stacks/2.1/HIVE/test_hive_metastore.py  |  4 ++--
 .../python/stacks/2.1/STORM/test_storm_drpc_server.py   |  4 ++--
 .../test/python/stacks/2.1/STORM/test_storm_nimbus.py   |  4 ++--
 .../python/stacks/2.1/STORM/test_storm_nimbus_prod.py   |  4 ++--
 .../stacks/2.1/STORM/test_storm_rest_api_service.py     |  4 ++--
 .../python/stacks/2.1/STORM/test_storm_supervisor.py    |  4 ++--
 .../stacks/2.1/STORM/test_storm_supervisor_prod.py      |  4 ++--
 .../python/stacks/2.1/STORM/test_storm_ui_server.py     |  4 ++--
 .../src/test/python/stacks/2.1/TEZ/test_tez_client.py   |  8 ++++----
 .../python/stacks/2.1/YARN/test_apptimelineserver.py    |  4 ++--
 .../python/stacks/2.2/ACCUMULO/test_accumulo_client.py  |  4 ++--
 .../test/python/stacks/2.2/KAFKA/test_kafka_broker.py   |  4 ++--
 .../test/python/stacks/2.2/KNOX/test_knox_gateway.py    | 12 ++++++------
 .../test/python/stacks/2.2/RANGER/test_ranger_admin.py  |  4 ++--
 .../python/stacks/2.2/RANGER/test_ranger_usersync.py    |  4 ++--
 .../test/python/stacks/2.2/SLIDER/test_slider_client.py |  8 ++++----
 .../python/stacks/2.2/SPARK/test_job_history_server.py  |  4 ++--
 .../test/python/stacks/2.2/SPARK/test_spark_client.py   |  4 ++--
 .../test/python/stacks/2.3/MAHOUT/test_mahout_client.py |  4 ++--
 .../python/stacks/2.3/SPARK/test_spark_thrift_server.py |  4 ++--
 44 files changed, 105 insertions(+), 105 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
index a820bc3..2ad9a0d 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
@@ -177,7 +177,7 @@ PACKAGE_DIRS = {
 }
 
 def get_cmd(command, package, version):
-  return ('conf-select', command, '--package', package, '--stack-version', version, '--conf-version', '0')
+  return ('ambari-python-wrap','/usr/bin/conf-select', command, '--package', package, '--stack-version', version, '--conf-version', '0')
 
 def _valid(stack_name, package, ver):
   if stack_name != "HDP":

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
index 742b39a..9959874 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
@@ -235,15 +235,15 @@ class TestHBaseClient(RMFTestCase):
     self.assertEquals(3, mocks_dict['call'].call_count)
     self.assertEquals(6, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hbase', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hbase', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[1][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hbase', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hbase', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[4][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[1][0][0])
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 7e1a6f1..74b4aa6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -763,9 +763,9 @@ class TestHBaseMaster(RMFTestCase):
     self.assertEquals(3, mocks_dict['checked_call'].call_count)
 
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hbase', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hbase', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[1][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hbase', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hbase', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
index 9973825..fa134b4 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
@@ -538,9 +538,9 @@ class TestHbaseRegionServer(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(3, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hbase', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hbase', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[1][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hbase', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hbase', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index 0ec1104..fd66502 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -491,10 +491,10 @@ class TestDatanode(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
index 9177039..9d93128 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
@@ -210,10 +210,10 @@ class Test(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 
   def test_pre_rolling_restart(self):

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
index 364715d..d333071 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
@@ -497,8 +497,8 @@ class TestJournalnode(RMFTestCase):
     self.assertNoMoreResources()
 
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index ada4d9b..3378892 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -1326,10 +1326,10 @@ class TestNamenode(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 
   def test_post_rolling_restart(self):

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
index db4b350..97898b8 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
@@ -216,14 +216,14 @@ class TestHiveClient(RMFTestCase):
     self.assertEquals(2, mocks_dict['call'].call_count)
     self.assertEquals(2, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hive', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hive', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hive', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hive', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[1][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[1][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
index c2b845c..14ed232 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
@@ -964,8 +964,8 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hive', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hive', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hive', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hive', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
index bf8742e..c11c2d0 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
@@ -391,16 +391,16 @@ class TestWebHCatServer(RMFTestCase):
     self.assertEquals(2, mocks_dict['call'].call_count)
     self.assertEquals(2, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hive-hcatalog', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hive-hcatalog', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hive-hcatalog', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hive-hcatalog', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[1][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[1][0][0])
 
   @patch("resource_management.core.shell.call")

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
index 6d3e7d0..a21346d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
@@ -240,8 +240,8 @@ class TestOozieClient(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'oozie', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'oozie', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'oozie', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'oozie', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index 798ffd1..b931378 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -1267,11 +1267,11 @@ class TestOozieServer(RMFTestCase):
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
 
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'oozie', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'oozie', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
 
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'oozie', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'oozie', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 
 
@@ -1529,9 +1529,9 @@ class TestOozieServer(RMFTestCase):
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
 
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'oozie', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'oozie', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
 
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'oozie', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'oozie', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
index e363b2c..2ea5b67 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
@@ -173,14 +173,14 @@ class TestPigClient(RMFTestCase):
     self.assertEquals(2, mocks_dict['call'].call_count)
     self.assertEquals(2, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'pig', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'pig', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[1][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'pig', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'pig', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[1][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py b/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
index e07139e..381eb17 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
@@ -134,8 +134,8 @@ class TestSqoop(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'sqoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'sqoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'sqoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'sqoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
index 1b2fae5..ab6d162 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
@@ -798,8 +798,8 @@ class TestHistoryServer(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
index 0a2480e..532ce36 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
@@ -410,8 +410,8 @@ class TestMapReduce2Client(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
index 262a2d1..0542dea 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
@@ -719,8 +719,8 @@ class TestNodeManager(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
index 9e98601..a965c90 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
@@ -646,8 +646,8 @@ class TestResourceManager(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
index 4adf6e5..413b2ad 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
@@ -579,8 +579,8 @@ class TestYarnClient(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
index 309380c..7a624bd 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
@@ -196,10 +196,10 @@ class TestZookeeperClient(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'zookeeper', '--stack-version', '2.3.0.0-3242', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'zookeeper', '--stack-version', '2.3.0.0-3242', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'zookeeper', '--stack-version', '2.3.0.0-3242', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'zookeeper', '--stack-version', '2.3.0.0-3242', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
index ac5311f..a6d610f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
@@ -382,10 +382,10 @@ class TestZookeeperServer(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'zookeeper', '--stack-version', '2.3.0.0-3242', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'zookeeper', '--stack-version', '2.3.0.0-3242', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'zookeeper', '--stack-version', '2.3.0.0-3242', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'zookeeper', '--stack-version', '2.3.0.0-3242', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
index 2667aaf..72bc3b7 100644
--- a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
+++ b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
@@ -146,8 +146,8 @@ class TestFalconClient(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'falcon', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'falcon', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'falcon', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'falcon', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
index 229dffa..c647228 100644
--- a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
@@ -555,8 +555,8 @@ class TestFalconServer(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'falcon', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'falcon', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'falcon', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'falcon', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
index 0dcbd7d..a51f139 100644
--- a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
+++ b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
@@ -524,10 +524,10 @@ class TestHiveMetastore(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hive', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hive', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hive', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hive', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
index 3282192..7eb3f50 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
@@ -174,10 +174,10 @@ class TestStormDrpcServer(TestStormBase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
index 5a9c8bf..39c7ee3 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
@@ -175,10 +175,10 @@ class TestStormNimbus(TestStormBase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
     
   @patch("resource_management.libraries.functions.security_commons.build_expectations")

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
index 78b8866..e1506d8 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
@@ -140,8 +140,8 @@ class TestStormNimbus(TestStormBase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
index 7513f35..127e63d 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
@@ -173,8 +173,8 @@ class TestStormRestApi(TestStormBase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
index 628ea69..ef1c91e 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
@@ -220,8 +220,8 @@ class TestStormSupervisor(TestStormBase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
index aa3b0e6..cd06e5b 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
@@ -189,8 +189,8 @@ class TestStormSupervisor(TestStormBase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
index af51303..78e8202 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
@@ -171,10 +171,10 @@ class TestStormUiServer(TestStormBase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py b/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
index 781105f..3532829 100644
--- a/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
+++ b/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
@@ -114,14 +114,14 @@ class TestTezClient(RMFTestCase):
     self.assertEquals(2, mocks_dict['call'].call_count)
     self.assertEquals(2, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'tez', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'tez', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'tez', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'tez', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[1][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[1][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
index 58573de..6f3ea6d 100644
--- a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
+++ b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
@@ -398,8 +398,8 @@ class TestAppTimelineServer(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py b/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
index 139a7d3..bc6f669 100644
--- a/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
+++ b/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
@@ -73,9 +73,9 @@ class TestAccumuloClient(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'accumulo', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'accumulo', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
 
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'accumulo', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'accumulo', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py b/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
index d62c2fd..a034cee 100644
--- a/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
+++ b/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
@@ -141,8 +141,8 @@ class TestKafkaBroker(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'kafka', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'kafka', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'kafka', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'kafka', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py b/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
index 609c57e..817b87d 100644
--- a/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
+++ b/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
@@ -315,10 +315,10 @@ class TestKnoxGateway(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 
   @patch("os.remove")
@@ -387,10 +387,10 @@ class TestKnoxGateway(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 
   @patch("os.remove")
@@ -461,10 +461,10 @@ class TestKnoxGateway(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
     '''
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
index 50d523c..f2d8178 100644
--- a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
+++ b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
@@ -216,8 +216,8 @@ class TestRangerAdmin(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'ranger-admin', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'ranger-admin', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'ranger-admin', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'ranger-admin', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
index c325709..fecc734 100644
--- a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
+++ b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
@@ -153,10 +153,10 @@ class TestRangerUsersync(RMFTestCase):
     self.assertEquals(2, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'ranger-usersync', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'ranger-usersync', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'ranger-usersync', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'ranger-usersync', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 
   def assert_configure_default(self):

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py b/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
index 7d45a35..f83cf7b 100644
--- a/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
+++ b/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
@@ -143,14 +143,14 @@ class TestSliderClient(RMFTestCase):
     self.assertEquals(2, mocks_dict['call'].call_count)
     self.assertEquals(2, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'slider', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'slider', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'slider', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'slider', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[1][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[1][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py b/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
index 76e7653..4a07e91 100644
--- a/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
+++ b/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
@@ -313,8 +313,8 @@ class TestJobHistoryServer(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'spark', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'spark', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'spark', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'spark', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py b/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
index fc4b596..b9c2878 100644
--- a/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
+++ b/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
@@ -150,8 +150,8 @@ class TestSparkClient(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'spark', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'spark', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'spark', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'spark', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py b/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
index 17eaa61..c0daec0 100644
--- a/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
+++ b/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
@@ -117,9 +117,9 @@ class TestMahoutClient(RMFTestCase):
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
 
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'mahout', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'mahout', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
       mocks_dict['checked_call'].call_args_list[0][0][0])
 
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'mahout', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'mahout', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
       mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/9f5d2978/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py b/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
index 6e36508..b3b8235 100644
--- a/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
+++ b/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
@@ -173,8 +173,8 @@ class TestSparkThriftServer(RMFTestCase):
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)
     self.assertEquals(
-      ('conf-select', 'set-conf-dir', '--package', 'spark', '--stack-version', '2.3.2.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'spark', '--stack-version', '2.3.2.0-1234', '--conf-version', '0'),
        mocks_dict['checked_call'].call_args_list[0][0][0])
     self.assertEquals(
-      ('conf-select', 'create-conf-dir', '--package', 'spark', '--stack-version', '2.3.2.0-1234', '--conf-version', '0'),
+      ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'spark', '--stack-version', '2.3.2.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])


[38/50] [abbrv] ambari git commit: AMBARI-13202. Improve error checking for blueprint resource creation. (Olivér Szabó via rnettleton)

Posted by nc...@apache.org.
AMBARI-13202. Improve error checking for blueprint resource creation. (Olivér Szabó via rnettleton)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/460d191a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/460d191a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/460d191a

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 460d191a1abdf7c59344149b3e5fb8a24050afb1
Parents: 4119f0e
Author: Bob Nettleton <rn...@hortonworks.com>
Authored: Mon Sep 28 14:06:46 2015 -0400
Committer: Bob Nettleton <rn...@hortonworks.com>
Committed: Mon Sep 28 14:45:23 2015 -0400

----------------------------------------------------------------------
 .../internal/BlueprintResourceProvider.java     | 31 ++++---
 .../internal/BlueprintResourceProviderTest.java | 93 +++++++++++++++++++-
 2 files changed, 109 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/460d191a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintResourceProvider.java
index 5fa6655..6cb6a74 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintResourceProvider.java
@@ -18,6 +18,8 @@
 
 package org.apache.ambari.server.controller.internal;
 
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -85,8 +87,15 @@ public class BlueprintResourceProvider extends AbstractControllerResourceProvide
   public static final String PROPERTIES_PROPERTY_ID = "properties";
   public static final String PROPERTIES_ATTRIBUTES_PROPERTY_ID = "properties_attributes";
   public static final String SCHEMA_IS_NOT_SUPPORTED_MESSAGE =
-      "Configuration format provided in Blueprint is not supported";
-
+    "Configuration format provided in Blueprint is not supported";
+  public static final String REQUEST_BODY_EMPTY_ERROR_MESSAGE =
+    "Request body for Blueprint create request is empty";
+  public static final String CONFIGURATION_LIST_CHECK_ERROR_MESSAGE =
+    "Configurations property must be a List of Maps";
+  public static final String CONFIGURATION_MAP_CHECK_ERROR_MESSAGE =
+    "Configuration elements must be Maps";
+  public static final String CONFIGURATION_MAP_SIZE_CHECK_ERROR_MESSAGE =
+    "Configuration Maps must hold a single configuration type each";
   // Primary Key Fields
   private static Set<String> pkPropertyIds =
       new HashSet<String>(Arrays.asList(new String[]{
@@ -384,22 +393,16 @@ public class BlueprintResourceProvider extends AbstractControllerResourceProvide
       @Override
       public Void invoke() throws AmbariException {
         String rawRequestBody = requestInfoProps.get(Request.REQUEST_INFO_BODY_PROPERTY);
+        Preconditions.checkArgument(!Strings.isNullOrEmpty(rawRequestBody), REQUEST_BODY_EMPTY_ERROR_MESSAGE);
+
         Map<String, Object> rawBodyMap = jsonSerializer.<Map<String, Object>>fromJson(rawRequestBody, Map.class);
         Object configurationData = rawBodyMap.get(CONFIGURATION_PROPERTY_ID);
 
         if (configurationData != null) {
-          if (configurationData instanceof List) {
-            for (Object map : (List) configurationData) {
-              if (map instanceof Map) {
-                if (((Map) map).size() > 1) {
-                  throw new IllegalArgumentException("Configuration Maps must hold a single configuration type each");
-                }
-              } else {
-                throw new IllegalArgumentException("Configuration elements must be Maps");
-              }
-            }
-          } else {
-            throw new IllegalArgumentException("Configurations property must be a List of Maps");
+          Preconditions.checkArgument(configurationData instanceof List, CONFIGURATION_LIST_CHECK_ERROR_MESSAGE);
+          for (Object map : (List) configurationData) {
+            Preconditions.checkArgument(map instanceof Map, CONFIGURATION_MAP_CHECK_ERROR_MESSAGE);
+            Preconditions.checkArgument(((Map) map).size() <= 1, CONFIGURATION_MAP_SIZE_CHECK_ERROR_MESSAGE);
           }
         }
         Blueprint blueprint;

http://git-wip-us.apache.org/repos/asf/ambari/blob/460d191a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintResourceProviderTest.java
index 4a5ff46..5bfdebb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintResourceProviderTest.java
@@ -167,6 +167,38 @@ public class BlueprintResourceProviderTest {
     verify(dao, entity, blueprintFactory, metaInfo, request, managementController);
   }
 
+  @Test()
+  public void testCreateResources_ReqestBodyIsEmpty() throws Exception {
+    AmbariManagementController managementController = createMock(AmbariManagementController.class);
+    Request request = createMock(Request.class);
+
+    Set<Map<String, Object>> setProperties = getBlueprintTestProperties();
+    Map<String, String> requestInfoProperties = new HashMap<String, String>();
+    requestInfoProperties.put(Request.REQUEST_INFO_BODY_PROPERTY, null);
+
+    // set expectations
+    expect(request.getProperties()).andReturn(setProperties);
+    expect(request.getRequestInfoProperties()).andReturn(requestInfoProperties);
+
+    replay(request, managementController);
+    // end expectations
+
+    ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
+      Resource.Type.Blueprint,
+      PropertyHelper.getPropertyIds(Resource.Type.Blueprint),
+      PropertyHelper.getKeyPropertyIds(Resource.Type.Blueprint),
+      managementController);
+
+    try {
+      provider.createResources(request);
+      fail("Exception expected");
+    } catch (IllegalArgumentException e) {
+      //expected exception
+      assertEquals(BlueprintResourceProvider.REQUEST_BODY_EMPTY_ERROR_MESSAGE, e.getMessage());
+    }
+    verify(request, managementController);
+  }
+
   @Test
   public void testCreateResources_NoValidation() throws Exception {
 
@@ -491,7 +523,7 @@ public class BlueprintResourceProviderTest {
   }
 
   @Test
-  public void testCreateResources_withWrongConfigurationsStructure() throws ResourceAlreadyExistsException, SystemException,
+  public void testCreateResources_wrongConfigurationsStructure_withWrongConfigMapSize() throws ResourceAlreadyExistsException, SystemException,
       UnsupportedPropertyException, NoSuchParentResourceException
   {
     Request request = createMock(Request.class);
@@ -516,6 +548,65 @@ public class BlueprintResourceProviderTest {
       fail("Exception expected");
     } catch (IllegalArgumentException e) {
       //expected exception
+      assertEquals(BlueprintResourceProvider.CONFIGURATION_MAP_SIZE_CHECK_ERROR_MESSAGE, e.getMessage());
+    }
+    verify(dao, metaInfo, request);
+  }
+
+  @Test
+  public void testCreateResources_wrongConfigurationStructure_withoutConfigMaps() throws ResourceAlreadyExistsException, SystemException,
+    UnsupportedPropertyException, NoSuchParentResourceException {
+
+    Request request = createMock(Request.class);
+
+    Set<Map<String, Object>> setProperties = getBlueprintTestProperties();
+
+    Map<String, String> requestInfoProperties = new HashMap<String, String>();
+    String configurationData = "{\"configurations\":[\"config-type1\", \"config-type2\"]}";
+    requestInfoProperties.put(Request.REQUEST_INFO_BODY_PROPERTY, configurationData);
+
+    // set expectations
+    expect(request.getProperties()).andReturn(setProperties);
+    expect(request.getRequestInfoProperties()).andReturn(requestInfoProperties);
+
+    replay(dao, metaInfo, request);
+    // end expectations
+
+    try {
+      provider.createResources(request);
+      fail("Exception expected");
+    } catch (IllegalArgumentException e) {
+      //expected exception
+      assertEquals(BlueprintResourceProvider.CONFIGURATION_MAP_CHECK_ERROR_MESSAGE, e.getMessage());
+    }
+    verify(dao, metaInfo, request);
+  }
+
+  @Test
+  public void testCreateResources_wrongConfigurationStructure_withoutConfigsList() throws ResourceAlreadyExistsException, SystemException,
+    UnsupportedPropertyException, NoSuchParentResourceException {
+
+    Request request = createMock(Request.class);
+
+    Set<Map<String, Object>> setProperties = getBlueprintTestProperties();
+
+    Map<String, String> requestInfoProperties = new HashMap<String, String>();
+    String configurationData = "{\"configurations\":{\"config-type1\": \"properties\", \"config-type2\": \"properties\"}}";
+    requestInfoProperties.put(Request.REQUEST_INFO_BODY_PROPERTY, configurationData);
+
+    // set expectations
+    expect(request.getProperties()).andReturn(setProperties);
+    expect(request.getRequestInfoProperties()).andReturn(requestInfoProperties);
+
+    replay(dao, metaInfo, request);
+    // end expectations
+
+    try {
+      provider.createResources(request);
+      fail("Exception expected");
+    } catch (IllegalArgumentException e) {
+      //expected exception
+      assertEquals(BlueprintResourceProvider.CONFIGURATION_LIST_CHECK_ERROR_MESSAGE, e.getMessage());
     }
     verify(dao, metaInfo, request);
   }


[21/50] [abbrv] ambari git commit: AMBARI-13245. RU cluster in hung state while trying to perform downgrade (part2) (jonathanhurley)

Posted by nc...@apache.org.
AMBARI-13245. RU cluster in hung state while trying to perform downgrade (part2) (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/156afda5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/156afda5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/156afda5

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 156afda504ce75fdfaf0efd2509ca757baefc463
Parents: 7e2a1c0
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Sep 25 20:26:10 2015 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Sep 25 22:29:56 2015 -0400

----------------------------------------------------------------------
 .../actionmanager/ActionDBAccessorImpl.java     | 45 +++++---------------
 .../ambari/server/actionmanager/Request.java    | 35 +++------------
 .../apache/ambari/server/utils/Parallel.java    | 12 +++++-
 3 files changed, 26 insertions(+), 66 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/156afda5/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java
index 8768590..0f439de 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java
@@ -138,24 +138,12 @@ public class ActionDBAccessorImpl implements ActionDBAccessor {
   @Override
   public List<Stage> getAllStages(long requestId) {
     List<StageEntity> stageEntities = stageDAO.findByRequestId(requestId);
-    ParallelLoopResult<Stage> loopResult = Parallel.forLoop(stageEntities, new LoopBody<StageEntity, Stage>() {
-      @Override
-      public Stage run(StageEntity stageEntity) {
-        return stageFactory.createExisting(stageEntity);
-      }
-    });
-    if(loopResult.getIsCompleted()) {
-      return loopResult.getResult();
-    } else {
-      // Fetch any missing results sequentially
-      List<Stage> stages = loopResult.getResult();
-      for(int i = 0; i < stages.size(); i++) {
-        if(stages.get(i) == null) {
-          stages.set(i, stageFactory.createExisting(stageEntities.get(i)));
-        }
-      }
-      return stages;
+    List<Stage> stages = new ArrayList<>(stageEntities.size());
+    for( StageEntity stageEntity : stageEntities ){
+      stages.add(stageFactory.createExisting(stageEntity));
     }
+
+    return stages;
   }
 
   @Override
@@ -701,26 +689,13 @@ public class ActionDBAccessorImpl implements ActionDBAccessor {
   }
 
   @Override
-  public List<Request> getRequests(Collection<Long> requestIds){
+  public List<Request> getRequests(Collection<Long> requestIds) {
     List<RequestEntity> requestEntities = requestDAO.findByPks(requestIds);
-    ParallelLoopResult<Request> loopResult = Parallel.forLoop(requestEntities, new LoopBody<RequestEntity, Request>() {
-      @Override
-      public Request run(RequestEntity requestEntity) {
-        return requestFactory.createExisting(requestEntity);
-      }
-    });
-    if(loopResult.getIsCompleted()) {
-      return loopResult.getResult();
-    } else {
-      // Fetch any missing results sequentially
-      List<Request> requests = loopResult.getResult();
-      for(int i = 0; i < requests.size(); i++) {
-        if(requests.get(i) == null) {
-          requests.set(i, requestFactory.createExisting(requestEntities.get(i)));
-        }
-      }
-      return requests;
+    List<Request> requests = new ArrayList<Request>(requestEntities.size());
+    for (RequestEntity requestEntity : requestEntities) {
+      requests.add(requestFactory.createExisting(requestEntity));
     }
+    return requests;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/156afda5/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
index 26447e6..31e11c1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
@@ -23,7 +23,6 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 
-import com.google.inject.Inject;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.StaticallyInject;
 import org.apache.ambari.server.controller.ExecuteActionRequest;
@@ -37,14 +36,12 @@ import org.apache.ambari.server.orm.entities.RequestOperationLevelEntity;
 import org.apache.ambari.server.orm.entities.RequestResourceFilterEntity;
 import org.apache.ambari.server.orm.entities.StageEntity;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.utils.LoopBody;
-import org.apache.ambari.server.utils.Parallel;
-import org.apache.ambari.server.utils.ParallelLoopResult;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.gson.Gson;
+import com.google.inject.Inject;
 import com.google.inject.assistedinject.Assisted;
 import com.google.inject.assistedinject.AssistedInject;
 
@@ -197,32 +194,12 @@ public class Request {
     if(stageEntities == null || stageEntities.isEmpty()) {
       stages = Collections.emptyList();
     } else {
-      List<StageEntity> stageEntityList;
-      if(stageEntities instanceof List) {
-        stageEntityList = (List<StageEntity>) stageEntities;
-      } else {
-        stageEntityList = new ArrayList<StageEntity>(stageEntities);
-      }
-      ParallelLoopResult<Stage> loopResult = Parallel.forLoop(stageEntityList, new LoopBody<StageEntity, Stage>() {
-        @Override
-        public Stage run(StageEntity stageEntity) {
-          return stageFactory.createExisting(stageEntity);
-        }
-      });
-      List<Stage> stageList;
-      if(loopResult.getIsCompleted()) {
-        stageList = loopResult.getResult();
-      } else {
-        // Fetch any missing results sequentially
-        stageList = loopResult.getResult();
-        for(int i = 0; i < stages.size(); i++) {
-          if(stageList.get(i) == null) {
-            stageList.set(i, stageFactory.createExisting(stageEntityList.get(i)));
-          }
-        }
+      stages = new ArrayList<>(stageEntities.size());
+      for (StageEntity stageEntity : stageEntities) {
+        stages.add(stageFactory.createExisting(stageEntity));
       }
-      stages = stageList;
     }
+
     resourceFilters = filtersFromEntity(entity);
     operationLevel = operationLevelFromEntity(entity);
   }
@@ -424,7 +401,7 @@ public class Request {
   }
 
   public void setExclusive(boolean isExclusive) {
-    this.exclusive = isExclusive;
+    exclusive = isExclusive;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/156afda5/ambari-server/src/main/java/org/apache/ambari/server/utils/Parallel.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/Parallel.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/Parallel.java
index 9ca039b..c6e2156 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/Parallel.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/Parallel.java
@@ -35,13 +35,21 @@ import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.eclipse.persistence.internal.helper.ConcurrencyManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- *  This class provides support for parallel loops.
- *  Iterations in the loop run in parallel in parallel loops.
+ * <b>TEMPORARILY DO NOT USE WITH JPA ENTITIES</b>
+ * <p/>
+ * Deprecated since the use of this class to access JPA from multiple Ambari
+ * threads seems to cause thread liveliness problems in
+ * {@link ConcurrencyManager}.
+ * <p/>
+ * This class provides support for parallel loops. Iterations in the loop run in
+ * parallel in parallel loops.
  */
+@Deprecated
 public class Parallel {
 
   /**


[31/50] [abbrv] ambari git commit: AMBARI-13257 Assign unique id/class to database selection radio buttons/pulldowns. (ababiichuk)

Posted by nc...@apache.org.
AMBARI-13257 Assign unique id/class to database selection radio buttons/pulldowns. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f67543c4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f67543c4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f67543c4

Branch: refs/heads/branch-dev-patch-upgrade
Commit: f67543c406be7d014c3c35c1c572f6ff501241be
Parents: 291b7cb
Author: aBabiichuk <ab...@cybervisiontech.com>
Authored: Mon Sep 28 15:02:54 2015 +0300
Committer: aBabiichuk <ab...@cybervisiontech.com>
Committed: Mon Sep 28 15:02:54 2015 +0300

----------------------------------------------------------------------
 .../controls_service_config_radio_buttons.hbs   |  2 +-
 ambari-web/app/views/common/controls_view.js    | 20 ++++-
 .../test/views/common/controls_view_test.js     | 77 +++++++++++++++-----
 3 files changed, 80 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f67543c4/ambari-web/app/templates/wizard/controls_service_config_radio_buttons.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/controls_service_config_radio_buttons.hbs b/ambari-web/app/templates/wizard/controls_service_config_radio_buttons.hbs
index c9862c6..2187155 100644
--- a/ambari-web/app/templates/wizard/controls_service_config_radio_buttons.hbs
+++ b/ambari-web/app/templates/wizard/controls_service_config_radio_buttons.hbs
@@ -18,7 +18,7 @@
 
 {{#each option in view.options}}
   {{#unless option.hidden}}
-    <label class="radio">
+    <label {{bindAttr class="option.className :radio"}}>
       {{#view App.ServiceConfigRadioButton nameBinding = "view.name" valueBinding = "option.displayName"}}{{/view}}
       {{option.displayName}} &nbsp;
     </label>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f67543c4/ambari-web/app/views/common/controls_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/controls_view.js b/ambari-web/app/views/common/controls_view.js
index 17eb6bd..523026c 100644
--- a/ambari-web/app/views/common/controls_view.js
+++ b/ambari-web/app/views/common/controls_view.js
@@ -698,7 +698,25 @@ App.ServiceConfigRadioButtons = Ember.View.extend(App.ServiceConfigCalculateId,
     }
   }.observes('serviceConfig.value'),
 
-  optionsBinding: 'serviceConfig.options'
+  options: function () {
+    return this.get('serviceConfig.options').map(function (option) {
+      var dbTypePattern = /mysql|postgres|oracle|derby|mssql|sql\s?a/i,
+        className = '',
+        displayName = Em.get(option, 'displayName'),
+        dbTypeMatch = displayName.match(dbTypePattern);
+      if (dbTypeMatch) {
+        var dbSourcePattern = /new/i,
+          newDbMatch = displayName.match(dbSourcePattern);
+        if (newDbMatch) {
+          className += 'new-';
+        }
+        className += dbTypeMatch[0].replace(' ', '').toLowerCase();
+      }
+      return className ? Em.Object.create(option, {
+        className: className
+      }) : option;
+    });
+  }.property('serviceConfig.options')
 });
 
 App.ServiceConfigRadioButton = Ember.Checkbox.extend({

http://git-wip-us.apache.org/repos/asf/ambari/blob/f67543c4/ambari-web/test/views/common/controls_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/common/controls_view_test.js b/ambari-web/test/views/common/controls_view_test.js
index ea20450..fdf3739 100644
--- a/ambari-web/test/views/common/controls_view_test.js
+++ b/ambari-web/test/views/common/controls_view_test.js
@@ -22,8 +22,13 @@ var validator = require('utils/validator');
 
 describe('App.ServiceConfigRadioButtons', function () {
 
+  var view;
+
+  beforeEach(function () {
+    view = App.ServiceConfigRadioButtons.create();
+  });
+
   describe('#setConnectionUrl', function () {
-    var view = App.ServiceConfigRadioButtons.create();
     beforeEach(function () {
       sinon.stub(view, 'getPropertyByType', function (name) {
         return App.ServiceConfigProperty.create({'name': name});
@@ -44,21 +49,21 @@ describe('App.ServiceConfigRadioButtons', function () {
   });
 
   describe('#setRequiredProperties', function () {
-    var view = App.ServiceConfigRadioButtons.create({
-      serviceConfig: Em.Object.create(),
-      categoryConfigsAll: [
-        App.ServiceConfigProperty.create({
-          name: 'p1',
-          value: 'v1'
-        }),
-        App.ServiceConfigProperty.create({
-          name: 'p2',
-          value: 'v2'
-        })
-      ]
-    });
 
     beforeEach(function () {
+      view.reopen({
+        serviceConfig: Em.Object.create(),
+        categoryConfigsAll: [
+          App.ServiceConfigProperty.create({
+            name: 'p1',
+            value: 'v1'
+          }),
+          App.ServiceConfigProperty.create({
+            name: 'p2',
+            value: 'v2'
+          })
+        ]
+      });
       sinon.stub(view, 'getPropertyByType', function (name) {
         return view.get('categoryConfigsAll').findProperty('name', name);
       });
@@ -81,8 +86,7 @@ describe('App.ServiceConfigRadioButtons', function () {
 
   describe('#handleDBConnectionProperty', function () {
 
-    var view,
-      cases = [
+    var cases = [
         {
           dbType: 'mysql',
           driver: 'mysql-connector-java.jar',
@@ -273,7 +277,7 @@ describe('App.ServiceConfigRadioButtons', function () {
     cases.forEach(function (item) {
       it(item.title, function () {
         sinon.stub(App, 'get').withArgs('currentStackName').returns('HDP').withArgs('currentStackVersion').returns(item.currentStackVersion);
-        view = App.ServiceConfigRadioButtons.create({controller: item.controller});
+        view.reopen({controller: item.controller});
         sinon.stub(view, 'sendRequestRorDependentConfigs', Em.K);
         view.setProperties({
           categoryConfigsAll: item.controller.get('selectedService.configs'),
@@ -290,6 +294,45 @@ describe('App.ServiceConfigRadioButtons', function () {
     });
 
   });
+
+  describe('#options', function () {
+
+    var options = [
+        {
+          displayName: 'MySQL'
+        },
+        {
+          displayName: 'New PostgreSQL Database'
+        },
+        {
+          displayName: 'existing postgres db'
+        },
+        {
+          displayName: 'sqla database: existing'
+        },
+        {
+          displayName: 'SQL Anywhere database (New)'
+        },
+        {
+          displayName: 'displayName'
+        }
+      ],
+      classNames = ['mysql', 'new-postgres', 'postgres', 'sqla', 'new-sqla', undefined];
+
+    beforeEach(function () {
+      view.reopen({
+        serviceConfig: Em.Object.create({
+          options: options
+        })
+      });
+    });
+
+    it('should set class names for options', function () {
+      expect(view.get('options').mapProperty('displayName')).to.eql(options.mapProperty('displayName'));
+      expect(view.get('options').mapProperty('className')).to.eql(classNames);
+    });
+
+  });
 });
 
 describe('App.ServiceConfigRadioButton', function () {


[09/50] [abbrv] ambari git commit: AMBARI-13235. Incorrect Hosts table filtering after navigating back from Host Summary (rzang)

Posted by nc...@apache.org.
AMBARI-13235. Incorrect Hosts table filtering after navigating back from Host Summary (rzang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/bc94537f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/bc94537f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/bc94537f

Branch: refs/heads/branch-dev-patch-upgrade
Commit: bc94537f7c32efc094b012573ff457d12e1fdca7
Parents: e1f697a
Author: Richard Zang <rz...@apache.org>
Authored: Thu Sep 24 17:41:31 2015 -0700
Committer: Richard Zang <rz...@apache.org>
Committed: Thu Sep 24 17:41:31 2015 -0700

----------------------------------------------------------------------
 ambari-web/app/mappers/hosts_mapper.js   |  5 ++++-
 ambari-web/app/views/common/sort_view.js | 30 ++++++++++++++++++---------
 2 files changed, 24 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/bc94537f/ambari-web/app/mappers/hosts_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/hosts_mapper.js b/ambari-web/app/mappers/hosts_mapper.js
index ed9ef26..53f536c 100644
--- a/ambari-web/app/mappers/hosts_mapper.js
+++ b/ambari-web/app/mappers/hosts_mapper.js
@@ -153,7 +153,10 @@ App.hostsMapper = App.QuickDataMapper.create({
         var alertsSummary = item.alerts_summary;
         item.critical_warning_alerts_count = alertsSummary ? (alertsSummary.CRITICAL || 0) + (alertsSummary.WARNING || 0) : 0;
         item.cluster_id = clusterName;
-        item.index = index;
+        var existingHost = App.Host.find().findProperty('hostName', component.host_name);
+        var fromHostDetail = App.router.get('currentState.parentState.name') == 'hostDetails';
+        // There is no need to override existing index in host detail view since old model(already have indexes) will not be cleared.
+        item.index = (existingHost && fromHostDetail)? existingHost.get('index'): index;
 
         if (stackUpgradeSupport) {
           this.config = $.extend(this.config, {

http://git-wip-us.apache.org/repos/asf/ambari/blob/bc94537f/ambari-web/app/views/common/sort_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/sort_view.js b/ambari-web/app/views/common/sort_view.js
index 7ed4c72..ff46b55 100644
--- a/ambari-web/app/views/common/sort_view.js
+++ b/ambari-web/app/views/common/sort_view.js
@@ -203,18 +203,28 @@ var serverWrapperView = Em.View.extend({
    * Initialize and save sorting statuses: hostName sorting_asc
    */
   loadSortStatuses: function () {
-    var statuses = [];
     var childViews = this.get('childViews');
-    childViews.forEach(function (childView) {
-      var sortStatus = (childView.get('name') == 'hostName' && childView.get('status') == 'sorting') ? 'sorting_asc' : childView.get('status');
-      statuses.push({
-        name: childView.get('name'),
-        status: sortStatus
+    var statuses = App.db.getSortingStatuses(this.get('controller.name'));
+    if (statuses) {
+      var sortingColumn = App.db.getSortingStatuses(this.get('controller.name')).find(function(column){ return column.status != 'sorting'})
+      if (sortingColumn) {
+        var sortingColumnView = childViews.findProperty('name', sortingColumn.name);
+        sortingColumnView.set('status', sortingColumn.status);
+        this.get('controller').set('sortingColumn', sortingColumnView);
+      }
+    } else {
+      statuses = [];
+      childViews.forEach(function (childView) {
+        var sortStatus = (childView.get('name') == 'hostName' && childView.get('status') == 'sorting') ? 'sorting_asc' : childView.get('status');
+        statuses.push({
+          name: childView.get('name'),
+          status: sortStatus
+        });
+        childView.set('status', sortStatus);
       });
-      childView.set('status', sortStatus);
-    });
-    App.db.setSortingStatuses(this.get('controller.name'), statuses);
-    this.get('controller').set('sortingColumn', childViews.findProperty('name', 'hostName'));
+      App.db.setSortingStatuses(this.get('controller.name'), statuses);
+      this.get('controller').set('sortingColumn', childViews.findProperty('name', 'hostName'));
+    }
   },
 
   /**


[29/50] [abbrv] ambari git commit: AMBARI-13229. Detect non-compliant python versions and do not attempt to start Ambari Agent (aonishuk)

Posted by nc...@apache.org.
AMBARI-13229. Detect non-compliant python versions and do not attempt to start Ambari Agent (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a00b06ea
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a00b06ea
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a00b06ea

Branch: refs/heads/branch-dev-patch-upgrade
Commit: a00b06ea10b48f68021ec0406fb6575a4d09d71c
Parents: 360a4b4
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Mon Sep 28 13:33:36 2015 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Sep 28 13:33:36 2015 +0300

----------------------------------------------------------------------
 .../libraries/functions/get_hdp_version.py              |  6 ++++--
 .../libraries/functions/hdp_select.py                   |  6 ++++--
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py          |  2 +-
 .../HDFS/2.1.0.2.0/package/scripts/service_check.py     |  2 +-
 .../RANGER/0.4.0/package/scripts/setup_ranger_xml.py    |  6 +++---
 .../RANGER_KMS/0.5.0.2.3/package/scripts/kms.py         |  6 +++---
 .../main/resources/custom_actions/scripts/ru_set_all.py |  7 ++++---
 .../src/test/python/custom_actions/test_ru_set_all.py   |  4 ++--
 .../src/test/python/stacks/2.0.6/FLUME/test_flume.py    |  2 +-
 .../test/python/stacks/2.0.6/HBASE/test_hbase_client.py | 12 ++++++------
 .../test/python/stacks/2.0.6/HBASE/test_hbase_master.py |  4 ++--
 .../stacks/2.0.6/HBASE/test_hbase_regionserver.py       |  4 ++--
 .../stacks/2.0.6/HBASE/test_phoenix_queryserver.py      |  2 +-
 .../src/test/python/stacks/2.0.6/HDFS/test_datanode.py  |  4 ++--
 .../test/python/stacks/2.0.6/HDFS/test_hdfs_client.py   |  6 +++---
 .../test/python/stacks/2.0.6/HDFS/test_journalnode.py   |  4 ++--
 .../src/test/python/stacks/2.0.6/HDFS/test_namenode.py  |  4 ++--
 .../test/python/stacks/2.0.6/HDFS/test_nfsgateway.py    |  2 +-
 .../test/python/stacks/2.0.6/HIVE/test_hive_client.py   |  4 ++--
 .../test/python/stacks/2.0.6/HIVE/test_hive_server.py   | 10 +++++-----
 .../python/stacks/2.0.6/HIVE/test_webhcat_server.py     |  4 ++--
 .../test/python/stacks/2.0.6/OOZIE/test_oozie_client.py |  4 ++--
 .../test/python/stacks/2.0.6/OOZIE/test_oozie_server.py |  8 ++++----
 .../src/test/python/stacks/2.0.6/PIG/test_pig_client.py |  4 ++--
 .../src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py    |  2 +-
 .../test/python/stacks/2.0.6/YARN/test_historyserver.py |  2 +-
 .../python/stacks/2.0.6/YARN/test_mapreduce2_client.py  |  4 ++--
 .../test/python/stacks/2.0.6/YARN/test_nodemanager.py   |  2 +-
 .../python/stacks/2.0.6/YARN/test_resourcemanager.py    |  2 +-
 .../test/python/stacks/2.0.6/YARN/test_yarn_client.py   |  4 ++--
 .../stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py     |  4 ++--
 .../stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py     |  4 ++--
 .../test/python/stacks/2.1/FALCON/test_falcon_client.py |  4 ++--
 .../test/python/stacks/2.1/FALCON/test_falcon_server.py |  6 +++---
 .../test/python/stacks/2.1/HIVE/test_hive_metastore.py  |  8 ++++----
 .../python/stacks/2.1/STORM/test_storm_drpc_server.py   |  4 ++--
 .../test/python/stacks/2.1/STORM/test_storm_nimbus.py   |  8 ++++----
 .../python/stacks/2.1/STORM/test_storm_nimbus_prod.py   |  8 ++++----
 .../stacks/2.1/STORM/test_storm_rest_api_service.py     |  4 ++--
 .../python/stacks/2.1/STORM/test_storm_supervisor.py    |  8 ++++----
 .../stacks/2.1/STORM/test_storm_supervisor_prod.py      |  8 ++++----
 .../python/stacks/2.1/STORM/test_storm_ui_server.py     |  4 ++--
 .../src/test/python/stacks/2.1/TEZ/test_tez_client.py   |  6 +++---
 .../python/stacks/2.1/YARN/test_apptimelineserver.py    |  2 +-
 .../python/stacks/2.2/ACCUMULO/test_accumulo_client.py  |  4 ++--
 .../test/python/stacks/2.2/KAFKA/test_kafka_broker.py   |  4 ++--
 .../test/python/stacks/2.2/KNOX/test_knox_gateway.py    |  8 ++++----
 .../test/python/stacks/2.2/RANGER/test_ranger_admin.py  |  2 +-
 .../python/stacks/2.2/RANGER/test_ranger_usersync.py    |  4 ++--
 .../test/python/stacks/2.2/SLIDER/test_slider_client.py |  8 ++++----
 .../python/stacks/2.2/SPARK/test_job_history_server.py  |  2 +-
 .../test/python/stacks/2.2/SPARK/test_spark_client.py   |  2 +-
 .../test/python/stacks/2.3/MAHOUT/test_mahout_client.py |  4 ++--
 .../python/stacks/2.3/SPARK/test_spark_thrift_server.py |  2 +-
 54 files changed, 128 insertions(+), 123 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py
index e8fdbb6..a56d33a 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py
@@ -31,6 +31,8 @@ from resource_management.core.logger import Logger
 from resource_management.core.exceptions import Fail
 from resource_management.core import shell
 
+HDP_SELECT_BINARY = "/usr/bin/hdp-select"
+
 @OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
 def get_hdp_version(package_name):
   """
@@ -63,12 +65,12 @@ def get_hdp_version(package_name):
   @param package_name, name of the package, from which, function will try to get hdp version
   """
   
-  if not os.path.exists("/usr/bin/hdp-select"):
+  if not os.path.exists(HDP_SELECT_BINARY):
     Logger.info('Skipping get_hdp_version since hdp-select is not yet available')
     return None # lazy fail
   
   try:
-    command = 'hdp-select status ' + package_name
+    command = 'ambari-python-wrap {HDP_SELECT_BINARY} status {package_name}'.format(HDP_SELECT_BINARY=HDP_SELECT_BINARY, package_name=package_name)
     return_code, hdp_output = shell.call(command, timeout=20)
   except Exception, e:
     Logger.error(str(e))

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
index f4f0efc..5f05777 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
@@ -30,8 +30,10 @@ from resource_management.core.shell import call
 from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.version_select_util import get_versions_from_stack_root
 
+HDP_SELECT = '/usr/bin/hdp-select'
+HDP_SELECT_PREFIX = ('ambari-python-wrap', HDP_SELECT)
 # hdp-select set oozie-server 2.2.0.0-1234
-TEMPLATE = ('hdp-select', 'set')
+TEMPLATE = HDP_SELECT_PREFIX + ('set',)
 
 # a mapping of Ambari server role to hdp-select component name for all
 # non-clients
@@ -249,7 +251,7 @@ def get_hdp_versions(stack_root):
   :param stack_root: Stack install root
   :return: Returns list of installed stack versions.
   """
-  code, out = call("hdp-select versions")
+  code, out = call(HDP_SELECT_PREFIX + ('versions',))
   versions = []
   if 0 == code:
     for line in out.splitlines():

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index a3c02a6..1dfb280 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -228,7 +228,7 @@ class NameNodeDefault(NameNode):
     basedir = os.path.join(env.config.basedir, 'scripts')
     if(threshold == 'DEBUG'): #FIXME TODO remove this on PROD
       basedir = os.path.join(env.config.basedir, 'scripts', 'balancer-emulator')
-      command = ['python','hdfs-command.py']
+      command = ['ambari-python-wrap','hdfs-command.py']
 
     _print("Executing command %s\n" % command)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py
index 6ec3996..b4f44ae 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py
@@ -86,7 +86,7 @@ class HdfsServiceCheckDefault(HdfsServiceCheck):
         checkWebUIFileName = "checkWebUI.py"
         checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
         comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-        checkWebUICmd = format("python {checkWebUIFilePath} -m {comma_sep_jn_hosts} -p {journalnode_port} -s {https_only}")
+        checkWebUICmd = format("ambari-python-wrap {checkWebUIFilePath} -m {comma_sep_jn_hosts} -p {journalnode_port} -s {https_only}")
         File(checkWebUIFilePath,
              content=StaticFile(checkWebUIFileName),
              mode=0775)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
index 77e487f..36cbe87 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
@@ -192,7 +192,7 @@ def setup_ranger_db(rolling_upgrade=False):
   # User wants us to setup the DB user and DB?
   if params.create_db_dbuser:
     Logger.info('Setting up Ranger DB and DB User')
-    dba_setup = format('python {ranger_home}/dba_script.py -q')
+    dba_setup = format('ambari-python-wrap {ranger_home}/dba_script.py -q')
     Execute(dba_setup, 
             environment=env_dict,
             logoutput=True,
@@ -201,7 +201,7 @@ def setup_ranger_db(rolling_upgrade=False):
   else:
     Logger.info('Separate DBA property not set. Assuming Ranger DB and DB User exists!')
 
-  db_setup = format('python {ranger_home}/db_setup.py')
+  db_setup = format('ambari-python-wrap {ranger_home}/db_setup.py')
   Execute(db_setup, 
           environment=env_dict,
           logoutput=True,
@@ -220,7 +220,7 @@ def setup_java_patch(rolling_upgrade=False):
   if params.db_flavor.lower() == 'sqla':
     env_dict = {'RANGER_ADMIN_HOME':ranger_home, 'JAVA_HOME':params.java_home, 'LD_LIBRARY_PATH':params.ld_lib_path}
 
-  setup_java_patch = format('python {ranger_home}/db_setup.py -javapatch')
+  setup_java_patch = format('ambari-python-wrap {ranger_home}/db_setup.py -javapatch')
   Execute(setup_java_patch, 
           environment=env_dict,
           logoutput=True,

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
index fafe1ec..e14c209 100755
--- a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
@@ -98,8 +98,8 @@ def setup_kms_db():
     if params.db_flavor.lower() == 'sqla':
       env_dict = {'RANGER_KMS_HOME':params.kms_home, 'JAVA_HOME': params.java_home, 'LD_LIBRARY_PATH':params.ld_library_path}
 
-    dba_setup = format('python {kms_home}/dba_script.py -q')
-    db_setup = format('python {kms_home}/db_setup.py')
+    dba_setup = format('ambari-python-wrap {kms_home}/dba_script.py -q')
+    db_setup = format('ambari-python-wrap {kms_home}/db_setup.py')
 
     Execute(dba_setup, environment=env_dict, logoutput=True, user=params.kms_user)
     Execute(db_setup, environment=env_dict, logoutput=True, user=params.kms_user)
@@ -109,7 +109,7 @@ def setup_java_patch():
 
   if params.has_ranger_admin:
 
-    setup_java_patch = format('python {kms_home}/db_setup.py -javapatch')
+    setup_java_patch = format('ambari-python-wrap {kms_home}/db_setup.py -javapatch')
 
     env_dict = {'RANGER_KMS_HOME':params.kms_home, 'JAVA_HOME': params.java_home}
     if params.db_flavor.lower() == 'sqla':

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py b/ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py
index 6e5ddd2..c4660a7 100644
--- a/ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py
+++ b/ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py
@@ -33,7 +33,8 @@ from resource_management.core import shell
 from resource_management.core.exceptions import Fail
 from resource_management.core.logger import Logger
 from resource_management.core.resources.system import Execute, Link
-from resource_management.core.shell import as_sudo
+
+HDP_SELECT = '/usr/bin/hdp-select'
 
 class UpgradeSetAll(Script):
   """
@@ -59,7 +60,7 @@ class UpgradeSetAll(Script):
     real_ver = format_hdp_stack_version(version)
     if stack_name == "HDP":
       if compare_versions(real_ver, min_ver) >= 0:
-        cmd = ('hdp-select', 'set', 'all', version)
+        cmd = ('ambari-python-wrap', HDP_SELECT, 'set', 'all', version)
         code, out = shell.call(cmd, sudo=True)
 
       if compare_versions(real_ver, format_hdp_stack_version("2.3")) >= 0:
@@ -169,7 +170,7 @@ def link_config(old_conf, link_conf):
 
   old_conf_copy = os.path.join(old_parent, "conf.backup")
   if not os.path.exists(old_conf_copy):
-    Execute(as_sudo(["cp", "-R", "-p", old_conf, old_conf_copy]), logoutput=True)
+    Execute(("cp", "-R", "-p", old_conf, old_conf_copy), sudo=True, logoutput=True)
 
   shutil.rmtree(old_conf, ignore_errors=True)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/custom_actions/test_ru_set_all.py b/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
index b102b4e..2f74619 100644
--- a/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
+++ b/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
@@ -93,7 +93,7 @@ class TestRUSetAll(RMFTestCase):
     ru_execute = UpgradeSetAll()
     ru_execute.actionexecute(None)
 
-    call_mock.assert_called_with(('hdp-select', 'set', 'all', u'2.2.1.0-2260'), sudo=True)
+    call_mock.assert_called_with(('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'all', u'2.2.1.0-2260'), sudo=True)
 
   @patch("resource_management.core.shell.call")
   @patch.object(Script, 'get_config')
@@ -129,7 +129,7 @@ class TestRUSetAll(RMFTestCase):
     ru_execute.actionexecute(None)
 
     self.assertTrue(link_mock.called)
-    call_mock.assert_called_with(('hdp-select', 'set', 'all', '2.3.0.0-1234'), sudo=True)
+    call_mock.assert_called_with(('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'all', '2.3.0.0-1234'), sudo=True)
 
 
   @patch("os.path.islink")

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py b/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
index be2b87c..28b0802 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
@@ -536,7 +536,7 @@ class TestFlumeHandler(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'flume-server', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'flume-server', '2.2.1.0-2067'), sudo=True)
 
 
 def build_flume(content):

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
index ff25933..742b39a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
@@ -202,9 +202,9 @@ class TestHBaseClient(RMFTestCase):
                    target = RMFTestCase.TARGET_COMMON_SERVICES,
                    mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hbase-client', '2.2.1.0-2067'), sudo=True)
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'phoenix-client', '2.2.1.0-2067'), sudo=True)
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hbase-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'phoenix-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
     self.assertEquals(1, mocks_dict['call'].call_count)
 
 
@@ -228,9 +228,9 @@ class TestHBaseClient(RMFTestCase):
                        call_mocks = [(0, None), (0, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hbase-client', version), sudo=True)
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'phoenix-client', version), sudo=True)
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-client', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hbase-client', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'phoenix-client', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True)
 
     self.assertEquals(3, mocks_dict['call'].call_count)
     self.assertEquals(6, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index f19da4c..7e1a6f1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -733,7 +733,7 @@ class TestHBaseMaster(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        mocks_dict = mocks_dict)
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'hbase-master', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hbase-master', version), sudo=True,)
     self.assertFalse(call_mock.called)
     self.assertNoMoreResources()
 
@@ -757,7 +757,7 @@ class TestHBaseMaster(RMFTestCase):
                        call_mocks = [(0, None), (0, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hbase-master', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hbase-master', version), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(3, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
index 2cb05c0..9973825 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
@@ -491,7 +491,7 @@ class TestHbaseRegionServer(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'hbase-regionserver', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hbase-regionserver', version), sudo=True,)
     self.assertNoMoreResources()
 
 
@@ -533,7 +533,7 @@ class TestHbaseRegionServer(RMFTestCase):
                        call_mocks = [(0, None), (0, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hbase-regionserver', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hbase-regionserver', version), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(3, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
index 0cfc2e3..51a9edc 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
@@ -413,6 +413,6 @@ class TestPhoenixQueryServer(RMFTestCase):
         mode = 0755,
         cd_access = 'a',
     )
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'phoenix-server', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'phoenix-server', '2.3.0.0-1234'), sudo=True)
 
     self.assertNoMoreResources()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index d5a42f0..1b4bec6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -451,7 +451,7 @@ class TestDatanode(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'hadoop-hdfs-datanode', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-hdfs-datanode', version), sudo=True,)
     self.assertNoMoreResources()
 
 
@@ -472,7 +472,7 @@ class TestDatanode(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-hdfs-datanode', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-hdfs-datanode', version), sudo=True,)
 
     self.assertNoMoreResources()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
index 4948d01..9177039 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
@@ -82,7 +82,7 @@ class Test(RMFTestCase):
                    hdp_stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
 
     # for now, it's enough that hdp-select is confirmed
 
@@ -204,7 +204,7 @@ class Test(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-client', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)
@@ -228,5 +228,5 @@ class Test(RMFTestCase):
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-client', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True,)
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
index becc82b..364715d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
@@ -472,7 +472,7 @@ class TestJournalnode(RMFTestCase):
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-hdfs-journalnode', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-hdfs-journalnode', version), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -493,7 +493,7 @@ class TestJournalnode(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-hdfs-journalnode', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-hdfs-journalnode', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index b9211a5..ada4d9b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -1300,7 +1300,7 @@ class TestNamenode(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'hadoop-hdfs-namenode', version), sudo=True)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-hdfs-namenode', version), sudo=True)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -1320,7 +1320,7 @@ class TestNamenode(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-hdfs-namenode', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-hdfs-namenode', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
index 89b4762..ee85e4a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
@@ -401,5 +401,5 @@ class TestNFSGateway(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None), (0, None), (0, None), (0, None)])
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'hadoop-hdfs-nfs3', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-hdfs-nfs3', version), sudo=True,)
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
index 79bcc73..db4b350 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
@@ -188,7 +188,7 @@ class TestHiveClient(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'hadoop-client', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -210,7 +210,7 @@ class TestHiveClient(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'hadoop-client', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(2, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
index e4ca82e..c2b845c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
@@ -733,7 +733,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
      call_mocks = call_side_effects
     )
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hive-server2', '2.2.1.0-2065'), sudo=True,)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-server2', '2.2.1.0-2065'), sudo=True,)
     self.assertResourceCalledByIndex(31, 'Execute', 'hive --config /usr/hdp/current/hive-server2/conf/conf.server --service hiveserver2 --deregister 1.2.1.2.3.0.0-2434',
       path=['/bin:/usr/hdp/current/hive-server2/bin:/usr/hdp/current/hadoop-client/bin'],
       tries=1, user='hive')
@@ -757,7 +757,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
      call_mocks = call_side_effects
     )
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hive-server2', '2.2.1.0-2065'), sudo=True,)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-server2', '2.2.1.0-2065'), sudo=True,)
     self.assertResourceCalledByIndex(33, 'Execute', 'hive --config /etc/hive/conf.server --service hiveserver2 --deregister 1.2.1.2.3.0.0-2434',
       path=['/bin:/usr/hdp/current/hive-server2/bin:/usr/hdp/current/hadoop-client/bin'],
       tries=1, user='hive')
@@ -773,7 +773,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
     except:
       pass
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hive-server2', '2.2.1.0-2065'), sudo=True,)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-server2', '2.2.1.0-2065'), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.libraries.functions.security_commons.build_expectations")
@@ -905,7 +905,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'hive-server2', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-server2', version), sudo=True,)
 
     copy_to_hdfs_mock.assert_any_call("mapreduce", "hadoop", "hdfs", host_sys_prepped=False)
     copy_to_hdfs_mock.assert_any_call("tez", "hadoop", "hdfs", host_sys_prepped=False)
@@ -945,7 +945,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
 
     self.assertResourceCalled('Execute',
 
-                              ('hdp-select', 'set', 'hive-server2', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-server2', version), sudo=True,)
     copy_to_hdfs_mock.assert_any_call("mapreduce", "hadoop", "hdfs", host_sys_prepped=False)
     copy_to_hdfs_mock.assert_any_call("tez", "hadoop", "hdfs", host_sys_prepped=False)
     self.assertEquals(2, copy_to_hdfs_mock.call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
index aca7664..bf8742e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
@@ -356,7 +356,7 @@ class TestWebHCatServer(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'hive-webhcat', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-webhcat', version), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -385,7 +385,7 @@ class TestWebHCatServer(RMFTestCase):
     self.assertTrue("/usr/hdp/current/hive-webhcat/etc/webhcat" == sys.modules["params"].webhcat_conf_dir)
 
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'hive-webhcat', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-webhcat', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(2, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
index e8196d6..6d3e7d0 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
@@ -211,7 +211,7 @@ class TestOozieClient(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'oozie-client', version), sudo=True)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'oozie-client', version), sudo=True)
     self.assertNoMoreResources()
 
   
@@ -234,7 +234,7 @@ class TestOozieClient(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'oozie-client', version), sudo=True)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'oozie-client', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index 9cf426f..798ffd1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -1179,7 +1179,7 @@ class TestOozieServer(RMFTestCase):
       ('tar', '-zcvhf', '/tmp/oozie-upgrade-backup/oozie-conf-backup.tar', '/usr/hdp/current/oozie-server/conf/'),
       sudo = True)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'oozie-server', u'2.2.1.0-2135'),
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'oozie-server', u'2.2.1.0-2135'),
       sudo = True )
 
     self.assertResourceCalled('Execute',
@@ -1247,7 +1247,7 @@ class TestOozieServer(RMFTestCase):
       ('tar', '-zcvhf', '/tmp/oozie-upgrade-backup/oozie-conf-backup.tar', '/usr/hdp/current/oozie-server/conf/'),
       sudo = True)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'oozie-server', '2.3.0.0-1234'), sudo = True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'oozie-server', '2.3.0.0-1234'), sudo = True)
 
     self.assertResourceCalled('Execute',
       ('tar', '-xvf', '/tmp/oozie-upgrade-backup/oozie-conf-backup.tar', '-C', '/usr/hdp/current/oozie-server/conf//'),
@@ -1305,7 +1305,7 @@ class TestOozieServer(RMFTestCase):
       ('tar', '-zcvhf', '/tmp/oozie-upgrade-backup/oozie-conf-backup.tar', '/usr/hdp/current/oozie-server/conf/'),
       sudo = True)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'oozie-server', u'2.2.0.0-0000'), sudo = True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'oozie-server', u'2.2.0.0-0000'), sudo = True)
 
     self.assertResourceCalled('Execute',
       ('tar', '-xvf', '/tmp/oozie-upgrade-backup/oozie-conf-backup.tar', '-C', '/usr/hdp/current/oozie-server/conf//'),
@@ -1505,7 +1505,7 @@ class TestOozieServer(RMFTestCase):
       ('tar', '-zcvhf', '/tmp/oozie-upgrade-backup/oozie-conf-backup.tar', '/usr/hdp/current/oozie-server/conf/'),
       sudo = True)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'oozie-server', '2.3.0.0-1234'), sudo = True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'oozie-server', '2.3.0.0-1234'), sudo = True)
 
     self.assertResourceCalled('Execute',
       ('tar', '-xvf', '/tmp/oozie-upgrade-backup/oozie-conf-backup.tar', '-C', '/usr/hdp/current/oozie-server/conf//'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
index 2f42520..e363b2c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
@@ -146,7 +146,7 @@ class TestPigClient(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'hadoop-client', version), sudo=True)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True)
     self.assertNoMoreResources()
 
   def test_pre_rolling_restart_23(self):
@@ -167,7 +167,7 @@ class TestPigClient(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'hadoop-client', version), sudo=True)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(2, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py b/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
index 2c0b8c8..e07139e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
@@ -129,7 +129,7 @@ class TestSqoop(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'sqoop-client', version), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'sqoop-client', version), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
index b6d5f42..1b2fae5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
@@ -779,7 +779,7 @@ class TestHistoryServer(RMFTestCase):
                        call_mocks = [(0, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-mapreduce-historyserver', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-mapreduce-historyserver', version), sudo=True)
     copy_to_hdfs_mock.assert_called_with("tez", "hadoop", "hdfs", host_sys_prepped=False)
 
     self.assertResourceCalled('HdfsResource', None,

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
index 0e52264..0a2480e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
@@ -384,7 +384,7 @@ class TestMapReduce2Client(RMFTestCase):
     )
 
     # for now, it's enough that hdp-select is confirmed
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
 
 
   def test_pre_rolling_restart_23(self):
@@ -404,7 +404,7 @@ class TestMapReduce2Client(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-client', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
index 2692420..262a2d1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
@@ -713,7 +713,7 @@ class TestNodeManager(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-yarn-nodemanager', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-yarn-nodemanager', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
index fb7d847..9e98601 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
@@ -640,7 +640,7 @@ class TestResourceManager(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-yarn-resourcemanager', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-yarn-resourcemanager', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
index 21fbb9d..4adf6e5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
@@ -551,7 +551,7 @@ class TestYarnClient(RMFTestCase):
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
 
     # for now, it's enough that hdp-select is confirmed
 
@@ -573,7 +573,7 @@ class TestYarnClient(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-client', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
index 79fd74b..309380c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
@@ -167,7 +167,7 @@ class TestZookeeperClient(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'zookeeper-client', version), sudo=True)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'zookeeper-client', version), sudo=True)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -191,7 +191,7 @@ class TestZookeeperClient(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'zookeeper-client', version), sudo=True)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'zookeeper-client', version), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
index afc4bc7..ac5311f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
@@ -353,7 +353,7 @@ class TestZookeeperServer(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'zookeeper-server', version), sudo=True)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'zookeeper-server', version), sudo=True)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -377,7 +377,7 @@ class TestZookeeperServer(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'zookeeper-server', version), sudo=True)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'zookeeper-server', version), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
index 9b08a90..2667aaf 100644
--- a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
+++ b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
@@ -118,7 +118,7 @@ class TestFalconClient(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'falcon-client', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'falcon-client', version), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -140,7 +140,7 @@ class TestFalconClient(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'falcon-client', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'falcon-client', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
index c9166a5..229dffa 100644
--- a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
@@ -220,7 +220,7 @@ class TestFalconServer(RMFTestCase):
      u'/hadoop/falcon'),
         sudo = True,
     )
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'falcon-server', u'2.2.1.0-2135'),
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'falcon-server', u'2.2.1.0-2135'),
         sudo = True,
     )
     self.assertResourceCalled('Execute', ('tar',
@@ -504,7 +504,7 @@ class TestFalconServer(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'falcon-server', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'falcon-server', version), sudo=True,)
     self.printResources()
 
   @patch('os.path.isfile', new=MagicMock(return_value=True))
@@ -531,7 +531,7 @@ class TestFalconServer(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'falcon-server', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'falcon-server', version), sudo=True,)
 
     self.assertResourceCalled('Execute', ('tar',
      '-xvf',

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
index 9d42b9f..0dcbd7d 100644
--- a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
+++ b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
@@ -496,7 +496,7 @@ class TestHiveMetastore(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'hive-metastore', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-metastore', version), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -518,7 +518,7 @@ class TestHiveMetastore(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'hive-metastore', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-metastore', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)
@@ -581,7 +581,7 @@ class TestHiveMetastore(RMFTestCase):
      logoutput = True, environment = {'HIVE_CONF_DIR': '/usr/hdp/current/hive-server2/conf/conf.server'},
       tries = 1, user = 'hive')
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hive-metastore', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-metastore', version), sudo=True,)
 
     self.assertNoMoreResources()
 
@@ -671,6 +671,6 @@ class TestHiveMetastore(RMFTestCase):
                               logoutput = True, environment = {'HIVE_CONF_DIR': '/usr/hdp/current/hive-server2/conf/conf.server'},
                               tries = 1, user = 'hive')
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hive-metastore', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-metastore', version), sudo=True,)
 
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
index 283c865..3282192 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
@@ -150,7 +150,7 @@ class TestStormDrpcServer(TestStormBase):
                      hdp_stack_version = self.STACK_VERSION,
                      target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
 
   def test_pre_rolling_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/default.json"
@@ -169,7 +169,7 @@ class TestStormDrpcServer(TestStormBase):
                      call_mocks = [(0, None), (0, None)],
                      mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
index bbcc15a..5a9c8bf 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
@@ -149,8 +149,8 @@ class TestStormNimbus(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-nimbus', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-nimbus', '2.2.1.0-2067'), sudo=True)
 
   def test_pre_rolling_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/default.json"
@@ -169,8 +169,8 @@ class TestStormNimbus(TestStormBase):
                      call_mocks = [(0, None), (0, None)],
                      mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-nimbus', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-nimbus', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
index 03f33f6..78b8866 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
@@ -114,8 +114,8 @@ class TestStormNimbus(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-nimbus', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-nimbus', '2.2.1.0-2067'), sudo=True)
 
   def test_pre_rolling_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/default.json"
@@ -134,8 +134,8 @@ class TestStormNimbus(TestStormBase):
                      call_mocks = [(0, None), (0, None)],
                      mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-nimbus', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-nimbus', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
index b26913e..7513f35 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
@@ -149,7 +149,7 @@ class TestStormRestApi(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
 
   def test_pre_rolling_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/default.json"
@@ -168,7 +168,7 @@ class TestStormRestApi(TestStormBase):
                      call_mocks = [(0, None), (0, None)],
                      mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
index 84fb64c..628ea69 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
@@ -194,8 +194,8 @@ class TestStormSupervisor(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-supervisor', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-supervisor', '2.2.1.0-2067'), sudo=True)
 
   def test_pre_rolling_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/default.json"
@@ -214,8 +214,8 @@ class TestStormSupervisor(TestStormBase):
                      call_mocks = [(0, None), (0, None)],
                      mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-supervisor', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-supervisor', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
index 649b716..aa3b0e6 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
@@ -163,8 +163,8 @@ class TestStormSupervisor(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-supervisor', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-supervisor', '2.2.1.0-2067'), sudo=True)
 
   def test_pre_rolling_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/default.json"
@@ -183,8 +183,8 @@ class TestStormSupervisor(TestStormBase):
                      call_mocks = [(0, None), (0, None)],
                      mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-supervisor', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-supervisor', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
index 185c66d..af51303 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
@@ -147,7 +147,7 @@ class TestStormUiServer(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
 
   def test_pre_rolling_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/default.json"
@@ -166,7 +166,7 @@ class TestStormUiServer(TestStormBase):
                      call_mocks = [(0, None), (0, None)],
                      mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py b/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
index 89037eb..781105f 100644
--- a/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
+++ b/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
@@ -73,7 +73,7 @@ class TestTezClient(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     get_hdp_version_mock.return_value = "2.2.1.0-2067"
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
 
     # for now, it's enough that hdp-select is confirmed
 
@@ -87,7 +87,7 @@ class TestTezClient(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     get_hdp_version_mock.return_value = "2.2.1.0-2067"
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
 
     # for now, it's enough that hdp-select is confirmed
 
@@ -108,7 +108,7 @@ class TestTezClient(RMFTestCase):
                        call_mocks = [(0, None), (0, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-client', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(2, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
index cb7b06b..58573de 100644
--- a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
+++ b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
@@ -392,7 +392,7 @@ class TestAppTimelineServer(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-yarn-timelineserver', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-yarn-timelineserver', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py b/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
index 1b518d4..139a7d3 100644
--- a/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
+++ b/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
@@ -44,7 +44,7 @@ class TestAccumuloClient(RMFTestCase):
       hdp_stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'accumulo-client', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'accumulo-client', version), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -67,7 +67,7 @@ class TestAccumuloClient(RMFTestCase):
       call_mocks = [(0, None), (0, None)],
       mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'accumulo-client', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'accumulo-client', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py b/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
index ffb5914..d62c2fd 100644
--- a/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
+++ b/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
@@ -113,7 +113,7 @@ class TestKafkaBroker(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'kafka-broker', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'kafka-broker', version), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -135,7 +135,7 @@ class TestKafkaBroker(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('hdp-select', 'set', 'kafka-broker', version), sudo=True,)
+                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'kafka-broker', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py b/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
index 83bba1f..609c57e 100644
--- a/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
+++ b/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
@@ -248,7 +248,7 @@ class TestKnoxGateway(RMFTestCase):
      '/var/lib/knox/data'),
         sudo = True,
     )
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'knox-server', '2.2.1.0-3242'),
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'knox-server', '2.2.1.0-3242'),
         sudo = True,
     )
     self.assertNoMoreResources()
@@ -292,7 +292,7 @@ class TestKnoxGateway(RMFTestCase):
      '/var/lib/knox/data'),
         sudo = True,
     )
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'knox-server', version),
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'knox-server', version),
         sudo = True,
     )
     self.assertResourceCalled('Execute', ('cp',
@@ -364,7 +364,7 @@ class TestKnoxGateway(RMFTestCase):
      '/var/lib/knox/data'),
         sudo = True,
     )
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'knox-server', version),
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'knox-server', version),
         sudo = True,
     )
     self.assertResourceCalled('Execute', ('cp',
@@ -438,7 +438,7 @@ class TestKnoxGateway(RMFTestCase):
     )
 
     '''
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'knox-server', version),
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'knox-server', version),
         sudo = True,
     )
     self.assertResourceCalled('Execute', ('cp',

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
index 912f187..50d523c 100644
--- a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
+++ b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
@@ -211,7 +211,7 @@ class TestRangerAdmin(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'ranger-admin', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'ranger-admin', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
index d10a9fc..c325709 100644
--- a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
+++ b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
@@ -125,7 +125,7 @@ class TestRangerUsersync(RMFTestCase):
                               environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_67'},
                               sudo = True
     )
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'ranger-usersync', '2.2.2.0-2399'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'ranger-usersync', '2.2.2.0-2399'), sudo=True)
 
   @patch("setup_ranger.setup_usersync")
   def test_upgrade_23(self, setup_usersync_mock):
@@ -148,7 +148,7 @@ class TestRangerUsersync(RMFTestCase):
     self.assertResourceCalled("Execute", ("/usr/bin/ranger-usersync-stop",),
                               environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_67'},
                               sudo = True)
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'ranger-usersync', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'ranger-usersync', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(2, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py b/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
index 3af55f6..7d45a35 100644
--- a/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
+++ b/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
@@ -115,8 +115,8 @@ class TestSliderClient(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'slider-client', '2.2.1.0-2067'), sudo=True)
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'slider-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
     self.assertNoMoreResources()
 
 
@@ -136,8 +136,8 @@ class TestSliderClient(RMFTestCase):
                        call_mocks = [(0, None), (0, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'slider-client', '2.3.0.0-1234'), sudo=True)
-    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'slider-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.3.0.0-1234'), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(2, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py b/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
index 369fdf9..76e7653 100644
--- a/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
+++ b/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
@@ -307,7 +307,7 @@ class TestJobHistoryServer(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'spark-historyserver', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'spark-historyserver', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py b/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
index a414dda..fc4b596 100644
--- a/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
+++ b/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
@@ -144,7 +144,7 @@ class TestSparkClient(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'spark-client', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'spark-client', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)


[08/50] [abbrv] ambari git commit: AMBARI-12879. Warn the user about HDFS Upgrade not finalized in Ambari Web UI (rzang)

Posted by nc...@apache.org.
AMBARI-12879. Warn the user about HDFS Upgrade not finalized in Ambari Web UI <fix3> (rzang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e1f697a8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e1f697a8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e1f697a8

Branch: refs/heads/branch-dev-patch-upgrade
Commit: e1f697a866a7b425dead3186b49ba5c281f4a001
Parents: 23bf111
Author: Richard Zang <rz...@apache.org>
Authored: Wed Sep 23 14:07:51 2015 -0700
Committer: Richard Zang <rz...@apache.org>
Committed: Thu Sep 24 17:21:31 2015 -0700

----------------------------------------------------------------------
 .../main/admin/stack_and_upgrade_controller.js      |  2 +-
 .../main/admin/stack_and_upgrade_controller_test.js | 16 ++++++++++------
 2 files changed, 11 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e1f697a8/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
index a7bfa98..880976f 100644
--- a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
@@ -849,7 +849,7 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
    * @method updateFinalizeSuccessCallback
    */
   updateFinalizeSuccessCallback: function (data) {
-    var context = data ? Em.get(data, 'upgrade_groups.firstObject.upgrade_items.firstObject.UpgradeItem.context') : '';
+    var context = data ? Em.get(data, 'items.firstObject.upgrade_groups.firstObject.upgrade_items.firstObject.UpgradeItem.context') : '';
     this.set('isFinalizeItem', context === this.get('finalizeContext'));
   },
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e1f697a8/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js b/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
index 5a1a500..b034ac4 100644
--- a/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
+++ b/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
@@ -1169,14 +1169,18 @@ describe('App.MainAdminStackAndUpgradeController', function() {
 
     it('data exists and Finalize should be true', function() {
       var data = {
-        upgrade_groups: [
+        items: [
           {
-            upgrade_items: [
+            upgrade_groups: [
               {
-                UpgradeItem: {
-                  context: controller.get('finalizeContext'),
-                  status: "HOLDING"
-                }
+                upgrade_items: [
+                  {
+                    UpgradeItem: {
+                      context: controller.get('finalizeContext'),
+                      status: "HOLDING"
+                    }
+                  }
+                ]
               }
             ]
           }


[15/50] [abbrv] ambari git commit: AMBARI-13235. Incorrect Hosts table filtering after navigating back from Host Summary (rzang)

Posted by nc...@apache.org.
AMBARI-13235. Incorrect Hosts table filtering after navigating back from Host Summary (rzang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9a7ceb54
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9a7ceb54
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9a7ceb54

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 9a7ceb54ec7503f4f1fcad77b7a0a6a6f22b166c
Parents: 80e2f20
Author: Richard Zang <rz...@apache.org>
Authored: Fri Sep 25 11:25:11 2015 -0700
Committer: Richard Zang <rz...@apache.org>
Committed: Fri Sep 25 11:25:11 2015 -0700

----------------------------------------------------------------------
 ambari-web/app/mappers/hosts_mapper.js | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9a7ceb54/ambari-web/app/mappers/hosts_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/hosts_mapper.js b/ambari-web/app/mappers/hosts_mapper.js
index 53f536c..3fcf295 100644
--- a/ambari-web/app/mappers/hosts_mapper.js
+++ b/ambari-web/app/mappers/hosts_mapper.js
@@ -94,6 +94,13 @@ App.hostsMapper = App.QuickDataMapper.create({
       var clusterName = App.get('clusterName');
       var advancedHostComponents = [];
 
+      // Create a map for quick access on existing hosts
+      var hosts = App.Host.find().toArray();
+      var hostsMap = {};
+      for (var p = 0; p < hosts.length; p++) {
+        hostsMap[hosts[p].get('hostName')] = hosts[p];
+      }
+
       // Use normal for loop instead of foreach to enhance performance
       for (var index = 0; index < json.items.length; index++) {
         var item = json.items[index];
@@ -153,10 +160,9 @@ App.hostsMapper = App.QuickDataMapper.create({
         var alertsSummary = item.alerts_summary;
         item.critical_warning_alerts_count = alertsSummary ? (alertsSummary.CRITICAL || 0) + (alertsSummary.WARNING || 0) : 0;
         item.cluster_id = clusterName;
-        var existingHost = App.Host.find().findProperty('hostName', component.host_name);
-        var fromHostDetail = App.router.get('currentState.parentState.name') == 'hostDetails';
+        var existingHost = hostsMap[component.host_name];
         // There is no need to override existing index in host detail view since old model(already have indexes) will not be cleared.
-        item.index = (existingHost && fromHostDetail)? existingHost.get('index'): index;
+        item.index = (existingHost && !json.itemTotal)? existingHost.get('index'): index;
 
         if (stackUpgradeSupport) {
           this.config = $.extend(this.config, {


[44/50] [abbrv] ambari git commit: AMBARI-13268. Adding HiveServer2 from service page causes JS-error (onechiporenko)

Posted by nc...@apache.org.
AMBARI-13268. Adding HiveServer2 from service page causes JS-error (onechiporenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7db1f10c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7db1f10c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7db1f10c

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 7db1f10c731c67db79214f975ce519d70658a50e
Parents: 9f5d297
Author: Oleg Nechiporenko <on...@apache.org>
Authored: Tue Sep 29 15:35:09 2015 +0300
Committer: Oleg Nechiporenko <on...@apache.org>
Committed: Tue Sep 29 15:42:52 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/controllers/main/service/item.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7db1f10c/ambari-web/app/controllers/main/service/item.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/item.js b/ambari-web/app/controllers/main/service/item.js
index ae1a327..89f44ac 100644
--- a/ambari-web/app/controllers/main/service/item.js
+++ b/ambari-web/app/controllers/main/service/item.js
@@ -19,7 +19,7 @@
 var App = require('app');
 var batchUtils = require('utils/batch_scheduled_requests');
 
-App.MainServiceItemController = Em.Controller.extend(App.SupportClientConfigsDownload, {
+App.MainServiceItemController = Em.Controller.extend(App.SupportClientConfigsDownload, App.InstallComponent, {
   name: 'mainServiceItemController',
 
   /**


[26/50] [abbrv] ambari git commit: AMBARI-13253. HBase service start fails after manual upgrade due to missing HBASE_REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS class. (mahadev)

Posted by nc...@apache.org.
AMBARI-13253. HBase service start fails after manual upgrade due to missing HBASE_REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS class. (mahadev)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3e7308a1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3e7308a1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3e7308a1

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 3e7308a1d4cf14216288006955654baa7e867f32
Parents: 62b7fe8
Author: Mahadev Konar <ma...@apache.org>
Authored: Sun Sep 27 11:27:49 2015 -0700
Committer: Mahadev Konar <ma...@apache.org>
Committed: Sun Sep 27 11:27:49 2015 -0700

----------------------------------------------------------------------
 ambari-server/src/main/python/upgradeHelper.py  | 58 +++++++++++++-------
 .../catalog/UpgradeCatalog_2.1_to_2.3.json      | 16 +++++-
 .../catalog/UpgradeCatalog_2.2_to_2.3.json      | 16 +++++-
 3 files changed, 66 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3e7308a1/ambari-server/src/main/python/upgradeHelper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/upgradeHelper.py b/ambari-server/src/main/python/upgradeHelper.py
index c09502f..1a6da53 100644
--- a/ambari-server/src/main/python/upgradeHelper.py
+++ b/ambari-server/src/main/python/upgradeHelper.py
@@ -262,6 +262,7 @@ class Options(Const):
   def initialize(cls):
     cls.ROOT_URL = '%s://%s:%s/api/v1' % (cls.API_PROTOCOL, cls.HOST, cls.API_PORT)
     cls.CLUSTER_URL = cls.ROOT_URL + "/clusters/%s" % cls.CLUSTER_NAME
+    cls.COMPONENTS_URL = cls.CLUSTER_URL + "/components?fields=ServiceComponentInfo/total_count"
     cls.COMPONENTS_FORMAT = cls.CLUSTER_URL + "/components/{0}"
     cls.TEZ_VIEW_URL = cls.ROOT_URL + "/views/TEZ"
     cls.STACKS_URL = cls.ROOT_URL + "/stacks"
@@ -282,8 +283,6 @@ class Options(Const):
       return True
     return False
 
-
-
   @classmethod
   def initialize_logger(cls, filename=None):
     cls.logger = logging.getLogger('UpgradeHelper')
@@ -350,6 +349,16 @@ class AmbariServer(object):
     Options.logger.info("Resolving Ambari server configuration ...")
     self._get_server_info()
     self._get_agents_info()
+    self._get_components()
+
+  def _get_components(self):
+    info = curl(Options.COMPONENTS_URL, parse=True)
+    self._components = []
+    if CatConst.ITEMS_TAG in info:
+      for item in info[CatConst.ITEMS_TAG]:
+        if "ServiceComponentInfo" in item and "total_count" in item["ServiceComponentInfo"] and \
+          int(item["ServiceComponentInfo"]["total_count"]) > 0 and "component_name" in item["ServiceComponentInfo"]:
+          self._components.append(item["ServiceComponentInfo"]["component_name"])
 
   def _get_server_info(self):
     info = curl(Options.AMBARI_SERVER_URL, parse=True)
@@ -371,6 +380,10 @@ class AmbariServer(object):
       self._agents = list(map(lambda x: x["RootServiceHostComponents"]["host_name"], agent_props))
 
   @property
+  def components(self):
+    return self._components
+
+  @property
   def server_version(self):
     return self._server_version
 
@@ -1419,23 +1432,6 @@ def get_kafka_listeners():
   return kafka_listeners
 
 
-def check_phoenix_component_existence():
-  try:
-    resultset = curl(Options.COMPONENTS_FORMAT.format(Options.PHOENIX_QUERY_SERVER), validate=False, parse=True)
-  except HTTPError as e:
-    raise TemplateProcessingException(str(e))
-
-  if "ServiceComponentInfo" in resultset and "total_count" in resultset["ServiceComponentInfo"]:
-    try:
-      component_count = int(resultset["ServiceComponentInfo"]["total_count"])
-      if component_count > 0:
-        return True
-    except ValueError:
-      return False
-
-  return False
-
-
 def get_ranger_xaaudit_hdfs_destination_directory():
   namenode_hostname="localhost"
   namenode_cfg = curl(Options.COMPONENTS_FORMAT.format(Options.NAMENODE), validate=False, parse=True)
@@ -1623,12 +1619,30 @@ def get_hbase_coprocessmaster_classes():
 
   return old_value
 
+
 def get_rpc_scheduler_factory_class():
-  if check_phoenix_component_existence():
+  if Options.PHOENIX_QUERY_SERVER in Options.ambari_server.components:
     return "org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory"
   else:
     return ""
 
+
+def get_hbase_rpc_controllerfactory_class():
+  if Options.PHOENIX_QUERY_SERVER in Options.ambari_server.components:
+    return "org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory"
+  else:
+    return ""
+
+
+def get_hbase_regionserver_wal_codec():
+  prop = "phoenix_sql_enabled"
+  scf = Options.server_config_factory
+  if "hbase-env" in scf.items():
+    if prop in scf.get_config("hbase-env").properties and scf.get_config("hbase-env").properties[prop].upper() == "TRUE":
+      return "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec"
+  return "org.apache.hadoop.hbase.regionserver.wal.WALCellCodec"
+
+
 def get_hbase_coprocessor_region_classes():
   scf = Options.server_config_factory
   prop = "hbase.coprocessor.region.classes"
@@ -1666,8 +1680,12 @@ def _substitute_handler(upgrade_catalog, tokens, value):
       value = value.replace(token, get_jh_host(upgrade_catalog))
     elif token == "{RESOURCEMANAGER_HOST}":
       value = value.replace(token, get_jt_host(upgrade_catalog))
+    elif token == "{HBASE_REGIONSERVER_WAL_CODEC}":
+      value = value.replace(token, get_hbase_regionserver_wal_codec())
     elif token == "{HBASE_REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS}":
       value = value.replace(token, get_rpc_scheduler_factory_class())
+    elif token == "{HBASE_RPC_CONTROLLERFACTORY_CLASS}":
+      value = value.replace(token, get_hbase_rpc_controllerfactory_class())
     elif token == "{ZOOKEEPER_QUORUM}":
       value = value.replace(token, get_zookeeper_quorum())
     elif token == "{HBASE_COPROCESS_MASTER_CLASSES}":

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e7308a1/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json
index 25c728b..250f5b2 100644
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json
+++ b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json
@@ -264,8 +264,20 @@
             ],
             "template": "yes"
           },
-          "hbase.rpc.controllerfactory.class": "org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory",
-          "hbase.regionserver.wal.codec": "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec",
+          "hbase.rpc.controllerfactory.class": {
+            "value": "{HBASE_RPC_CONTROLLERFACTORY_CLASS}",
+            "template": "yes",
+            "required-services": [
+              "HBASE"
+            ]
+          },
+          "hbase.regionserver.wal.codec": {
+            "value": "{HBASE_REGIONSERVER_WAL_CODEC}",
+            "template": "yes",
+            "required-services": [
+              "HBASE"
+            ]
+          },
           "phoenix.functions.allowUserDefinedFunctions": "true",
           "fs.hdfs.impl": "org.apache.hadoop.hdfs.DistributedFileSystem",
           "hbase.bucketcache.percentage.in.combinedcache": {"remove": "yes"},

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e7308a1/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3.json
index eddc1a7..88772f9 100644
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3.json
+++ b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3.json
@@ -393,8 +393,20 @@
             ],
             "template": "yes"
           },
-          "hbase.rpc.controllerfactory.class": "org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory",
-          "hbase.regionserver.wal.codec": "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec",
+          "hbase.rpc.controllerfactory.class": {
+            "value": "{HBASE_RPC_CONTROLLERFACTORY_CLASS}",
+            "template": "yes",
+            "required-services": [
+              "HBASE"
+            ]
+          },
+          "hbase.regionserver.wal.codec": {
+            "value": "{HBASE_REGIONSERVER_WAL_CODEC}",
+            "template": "yes",
+            "required-services": [
+              "HBASE"
+            ]
+          },
           "phoenix.functions.allowUserDefinedFunctions": "true",
           "fs.hdfs.impl": "org.apache.hadoop.hdfs.DistributedFileSystem",
           "hbase.bucketcache.percentage.in.combinedcache": {


[42/50] [abbrv] ambari git commit: AMBARI-13265: [PluggableStackDefinition] Update templates and shell scripts (jluniya)

Posted by nc...@apache.org.
AMBARI-13265: [PluggableStackDefinition] Update templates and shell scripts (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c8ea8573
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c8ea8573
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c8ea8573

Branch: refs/heads/branch-dev-patch-upgrade
Commit: c8ea8573a5b0c37dd467892be3e4774b4b01de20
Parents: 02c74c8
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Mon Sep 28 22:29:31 2015 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Mon Sep 28 22:29:31 2015 -0700

----------------------------------------------------------------------
 .../GenerateStackDefinition.py                     | 17 +++++++++++++++--
 .../pluggable_stack_definition/configs/PHD.json    |  2 +-
 .../pluggable_stack_definition/configs/SAPHD.json  |  2 +-
 3 files changed, 17 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c8ea8573/ambari-common/src/main/python/pluggable_stack_definition/GenerateStackDefinition.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/pluggable_stack_definition/GenerateStackDefinition.py b/ambari-common/src/main/python/pluggable_stack_definition/GenerateStackDefinition.py
index f1f9381..3038f90 100644
--- a/ambari-common/src/main/python/pluggable_stack_definition/GenerateStackDefinition.py
+++ b/ambari-common/src/main/python/pluggable_stack_definition/GenerateStackDefinition.py
@@ -369,6 +369,9 @@ def process_py_files(file_path, config_data, stack_version_changes):
 def process_xml_files(file_path, config_data, stack_version_changes):
   return process_replacements(file_path, config_data, stack_version_changes)
 
+def process_other_files(file_path, config_data, stack_version_changes):
+  return process_replacements(file_path, config_data, stack_version_changes)
+
 def process_config_xml(file_path, config_data):
   tree = ET.parse(file_path)
   root = tree.getroot()
@@ -520,6 +523,11 @@ class GeneratorHelper(object):
           ###################################################################
           target = process_py_files(target, self.config_data, self.stack_version_changes)
           return
+        ####################################################################
+        # Generic processing for all other types of files.
+        ####################################################################
+        if target.endswith(".j2") or target.endswith(".sh"):
+          process_other_files(target, self.config_data, self.stack_version_changes)
 
       copy_tree(original_stack, target_stack, ignored_files, post_copy=post_copy)
       # copy default stack advisor
@@ -542,12 +550,17 @@ class GeneratorHelper(object):
             # process configuration xml
             target = process_config_xml(target, self.config_data)
           # process generic xml
-          if target.endswith('.xml'):
-            process_xml_files(target, self.config_data, self.stack_version_changes)
+          process_xml_files(target, self.config_data, self.stack_version_changes)
+          return
         # process python files
         if target.endswith('.py'):
           process_py_files(target, self.config_data, self.stack_version_changes)
           return
+        ####################################################################
+        # Generic processing for all other types of files.
+        ####################################################################
+        if target.endswith(".j2") or target.endswith(".sh"):
+          process_other_files(target, self.config_data, self.stack_version_changes)
 
       copy_tree(source_folder, target_folder, ignored_files, post_copy=post_copy)
       if parent_services:

http://git-wip-us.apache.org/repos/asf/ambari/blob/c8ea8573/ambari-common/src/main/python/pluggable_stack_definition/configs/PHD.json
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/pluggable_stack_definition/configs/PHD.json b/ambari-common/src/main/python/pluggable_stack_definition/configs/PHD.json
index 45e45be..431a26c 100644
--- a/ambari-common/src/main/python/pluggable_stack_definition/configs/PHD.json
+++ b/ambari-common/src/main/python/pluggable_stack_definition/configs/PHD.json
@@ -5,7 +5,7 @@
   "textReplacements": [
     ["hdp-select", "distro-select"]
    ],
-  "preservedText": ["ext-2.2", "HDP-oozie", "hdp.version", "HDP_VERSION"],
+  "preservedText": ["ext-2.2", "HDP-oozie", "hdp.version", "HDP_VERSION", "192.168."],
   "uiMapping": [
     {
       "stackVersionNumber": "3.0",

http://git-wip-us.apache.org/repos/asf/ambari/blob/c8ea8573/ambari-common/src/main/python/pluggable_stack_definition/configs/SAPHD.json
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/pluggable_stack_definition/configs/SAPHD.json b/ambari-common/src/main/python/pluggable_stack_definition/configs/SAPHD.json
index 27c6995..de85ade 100644
--- a/ambari-common/src/main/python/pluggable_stack_definition/configs/SAPHD.json
+++ b/ambari-common/src/main/python/pluggable_stack_definition/configs/SAPHD.json
@@ -5,7 +5,7 @@
   "textReplacements": [
     ["hdp-select", "distro-select"]
    ],
-  "preservedText": ["ext-2.2", "HDP-oozie", "hdp.version", "HDP_VERSION"],
+  "preservedText": ["ext-2.2", "HDP-oozie", "hdp.version", "HDP_VERSION", "192.168."],
   "uiMapping": [
     {
       "stackVersionNumber": "1.0",


[41/50] [abbrv] ambari git commit: AMBARI-13264. Sometimes properties are missing on config tab while adding Ranger (rzang)

Posted by nc...@apache.org.
AMBARI-13264. Sometimes properties are missing on config tab while adding Ranger (rzang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/02c74c87
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/02c74c87
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/02c74c87

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 02c74c87ff35a1570369d0ee8b4acaeae2934cbe
Parents: 61540bb
Author: Richard Zang <rz...@apache.org>
Authored: Mon Sep 28 21:49:35 2015 -0700
Committer: Richard Zang <rz...@apache.org>
Committed: Mon Sep 28 21:50:51 2015 -0700

----------------------------------------------------------------------
 ambari-web/app/views/common/controls_view.js | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/02c74c87/ambari-web/app/views/common/controls_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/controls_view.js b/ambari-web/app/views/common/controls_view.js
index 523026c..17fab9f 100644
--- a/ambari-web/app/views/common/controls_view.js
+++ b/ambari-web/app/views/common/controls_view.js
@@ -447,10 +447,16 @@ App.ServiceConfigRadioButtons = Ember.View.extend(App.ServiceConfigCalculateId,
       minorVersion = version? version[2]: 0;
     // functionality added in HDP 2.3
     // remove DB_FLAVOR so it can handle DB Connection checks
-    if (App.get('currentStackName') == 'HDP' && majorVersion >= 2  && minorVersion>= 3) {
-      return ['ranger.authentication.method'];
+    // PHD-2.3 and SAPHD-1.0 is based on HDP-2.3
+    var supportFromMap = {
+      'HDP': 2.3,
+      'PHD': 3.3,
+      'SAPHD': 1.0
+    };
+    if (Number(majorVersion + '.' + minorVersion) < supportFromMap[App.get('currentStackName')]){
+      return ['DB_FLAVOR', 'authentication_method'];
     }
-    return ['DB_FLAVOR', 'authentication_method'];
+    return ['ranger.authentication.method'];
   }.property('App.currentStackName'),
 
   serviceConfig: null,


[25/50] [abbrv] ambari git commit: AMBARI-13252. RU: Spark service check failed multiple times - non root server, agent U14 (aonishuk)

Posted by nc...@apache.org.
AMBARI-13252. RU: Spark service check failed multiple times - non root server, agent U14 (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/62b7fe87
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/62b7fe87
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/62b7fe87

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 62b7fe87fce2f4d052a358f156aa19ad5c260b8d
Parents: bb1491f
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Sun Sep 27 01:15:50 2015 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Sun Sep 27 01:15:50 2015 +0300

----------------------------------------------------------------------
 .../SPARK/1.2.0.2.2/package/scripts/setup_spark.py               | 4 +++-
 .../src/test/python/stacks/2.2/SPARK/test_job_history_server.py  | 4 ++++
 .../src/test/python/stacks/2.2/SPARK/test_spark_client.py        | 4 ++++
 .../src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py | 2 ++
 4 files changed, 13 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/62b7fe87/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/setup_spark.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/setup_spark.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/setup_spark.py
index 1044e6b..9969a9b 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/setup_spark.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/setup_spark.py
@@ -47,7 +47,9 @@ def setup_spark(env, type, action = None):
     
   PropertiesFile(format("{spark_conf}/spark-defaults.conf"),
     properties = params.config['configurations']['spark-defaults'],
-    key_value_delimiter = " ",               
+    key_value_delimiter = " ", 
+    owner=params.spark_user,
+    group=params.spark_group,              
   )
 
   # create spark-env.sh in etc/conf dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/62b7fe87/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py b/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
index 4b87531..369fdf9 100644
--- a/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
+++ b/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
@@ -195,7 +195,9 @@ class TestJobHistoryServer(RMFTestCase):
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
     self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/spark-client/conf/spark-defaults.conf',
+        owner = 'spark',
         key_value_delimiter = ' ',
+        group = 'spark',
         properties = self.getConfig()['configurations']['spark-defaults'],
     )
     self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/spark-env.sh',
@@ -260,7 +262,9 @@ class TestJobHistoryServer(RMFTestCase):
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
     self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/spark-client/conf/spark-defaults.conf',
+        owner = 'spark',
         key_value_delimiter = ' ',
+        group = 'spark',
         properties = self.getConfig()['configurations']['spark-defaults'],
     )
     self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/spark-env.sh',

http://git-wip-us.apache.org/repos/asf/ambari/blob/62b7fe87/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py b/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
index 081db57..a414dda 100644
--- a/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
+++ b/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
@@ -63,7 +63,9 @@ class TestSparkClient(RMFTestCase):
         recursive = True,
     )
     self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/spark-client/conf/spark-defaults.conf',
+        owner = 'spark',
         key_value_delimiter = ' ',
+        group = 'spark',
         properties = self.getConfig()['configurations']['spark-defaults'],
     )
     self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/spark-env.sh',
@@ -99,7 +101,9 @@ class TestSparkClient(RMFTestCase):
         recursive = True,
     )
     self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/spark-client/conf/spark-defaults.conf',
+        owner = 'spark',
         key_value_delimiter = ' ',
+        group = 'spark',
         properties = self.getConfig()['configurations']['spark-defaults'],
     )
     self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/spark-env.sh',

http://git-wip-us.apache.org/repos/asf/ambari/blob/62b7fe87/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py b/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
index 031e0ac..9e41e11 100644
--- a/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
+++ b/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
@@ -118,7 +118,9 @@ class TestSparkThriftServer(RMFTestCase):
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
     self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/spark-client/conf/spark-defaults.conf',
+        owner = 'spark',
         key_value_delimiter = ' ',
+        group = 'spark',
         properties = self.getConfig()['configurations']['spark-defaults'],
     )
     self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/spark-env.sh',


[02/50] [abbrv] ambari git commit: AMBARI-13232. Remove wrong properties from 2.2 YARN config (dlysnichenko)

Posted by nc...@apache.org.
AMBARI-13232. Remove wrong properties from 2.2 YARN config (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d66b4de0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d66b4de0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d66b4de0

Branch: refs/heads/branch-dev-patch-upgrade
Commit: d66b4de05cab88b3cfb5425c1a11a4f8ba65196e
Parents: 5773340
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Thu Sep 24 21:11:32 2015 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Thu Sep 24 21:12:41 2015 +0300

----------------------------------------------------------------------
 .../services/YARN/configuration/capacity-scheduler.xml    | 10 ----------
 .../services/YARN/configuration/capacity-scheduler.xml    | 10 ----------
 .../upgrade/catalog/UpgradeCatalog_1.3_to_2.2.json        |  4 ++--
 .../upgrade/catalog/UpgradeCatalog_2.0_to_2.2.2.json      |  4 ++--
 .../upgrade/catalog/UpgradeCatalog_2.0_to_2.2.4.json      |  4 ++--
 .../upgrade/catalog/UpgradeCatalog_2.1_to_2.2.2.json      |  4 ++--
 .../upgrade/catalog/UpgradeCatalog_2.1_to_2.2.4.json      |  4 ++--
 7 files changed, 10 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d66b4de0/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/capacity-scheduler.xml
index f1d4b48..8e79024 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/capacity-scheduler.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/capacity-scheduler.xml
@@ -41,16 +41,6 @@
     <description></description>
   </property>
   <property>
-    <name>yarn.scheduler.capacity.root.accessible-node-labels.default.capacity</name>
-    <value>-1</value>
-    <description></description>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity</name>
-    <value>-1</value>
-    <description></description>
-  </property>
-  <property>
     <name>yarn.scheduler.capacity.root.default-node-label-expression</name>
     <value> </value>
     <description></description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d66b4de0/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/capacity-scheduler.xml
index fd9980c..f680b5b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/capacity-scheduler.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/capacity-scheduler.xml
@@ -18,16 +18,6 @@
 <configuration supports_final="false">
 
   <property>
-    <name>yarn.scheduler.capacity.root.accessible-node-labels.default.capacity</name>
-    <deleted>true</deleted>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity</name>
-    <deleted>true</deleted>
-  </property>
-
-  <property>
     <name>yarn.scheduler.capacity.root.default-node-label-expression</name>
     <deleted>true</deleted>
   </property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d66b4de0/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_1.3_to_2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_1.3_to_2.2.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_1.3_to_2.2.json
index d5087c3..102271e 100644
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_1.3_to_2.2.json
+++ b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_1.3_to_2.2.json
@@ -162,8 +162,8 @@
           "yarn.scheduler.capacity.root.default.state": "RUNNING",
           "yarn.scheduler.capacity.root.default.user-limit-factor": "1",
           "yarn.scheduler.capacity.root.queues": "default",
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": "-1",
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": "-1",
+          "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": {"remove": "yes"},
+          "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": {"remove": "yes"},
           "yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator",
           "yarn.scheduler.capacity.default.minimum-user-limit-percent": "100",
           "yarn.scheduler.capacity.node-locality-delay": "40",

http://git-wip-us.apache.org/repos/asf/ambari/blob/d66b4de0/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.2.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.2.json
index cc4400a..0d3e892 100644
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.2.json
+++ b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.2.json
@@ -61,8 +61,8 @@
                 "capacity-scheduler": {
                     "yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator",
                     "yarn.scheduler.capacity.root.accessible-node-labels": "*",
-                    "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": "-1",
-                    "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": "-1",
+                    "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": {"remove": "yes"},
+                    "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": {"remove": "yes"},
                     "yarn.scheduler.capacity.root.default-node-label-expression": " "
                 },
                 "core-site": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/d66b4de0/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.4.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.4.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.4.json
index 89c732a..ad360b6 100755
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.4.json
+++ b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.4.json
@@ -73,8 +73,8 @@
                 "capacity-scheduler": {
                     "yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator",
                     "yarn.scheduler.capacity.root.accessible-node-labels": "*",
-                    "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": "-1",
-                    "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": "-1",
+                    "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": {"remove": "yes"},
+                    "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": {"remove": "yes"},
                     "yarn.scheduler.capacity.root.default-node-label-expression": " "
                 },
                 "cluster-env": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/d66b4de0/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.2.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.2.json
index 017fb73..de28827 100644
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.2.json
+++ b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.2.json
@@ -73,8 +73,8 @@
         "capacity-scheduler": {
           "yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator",
           "yarn.scheduler.capacity.root.accessible-node-labels": "*",
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": "-1",
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": "-1",
+          "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": {"remove": "yes"},
+          "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": {"remove": "yes"},
           "yarn.scheduler.capacity.root.default-node-label-expression": " "    
         },
 		"core-site": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/d66b4de0/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.4.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.4.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.4.json
index 17f9030..9a4d3e2 100644
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.4.json
+++ b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.4.json
@@ -79,8 +79,8 @@
         "capacity-scheduler": {
           "yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator",
           "yarn.scheduler.capacity.root.accessible-node-labels": "*",
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": "-1",
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": "-1",
+          "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": {"remove": "yes"},
+          "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": {"remove": "yes"},
           "yarn.scheduler.capacity.root.default-node-label-expression": " "
         },
 		"cluster-env": {


[50/50] [abbrv] ambari git commit: AMBARI-12813 Increase OS kernel parameters for hbase (Juanjo Marron via dsen)

Posted by nc...@apache.org.
AMBARI-12813 Increase OS kernel parameters for hbase (Juanjo Marron via dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/bff61b83
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/bff61b83
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/bff61b83

Branch: refs/heads/branch-dev-patch-upgrade
Commit: bff61b83e962dfc7a8c7c9f8198bd038f16b6ec7
Parents: b3d36fb
Author: Dmytro Sen <ds...@apache.org>
Authored: Wed Sep 30 17:41:48 2015 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Wed Sep 30 17:41:48 2015 +0300

----------------------------------------------------------------------
 .../0.96.0.2.0/configuration/hbase-env.xml      | 12 ++++++-
 .../HBASE/0.96.0.2.0/package/scripts/hbase.py   | 20 +++++++++--
 .../0.96.0.2.0/package/scripts/params_linux.py  |  4 +++
 .../0.96.0.2.0/package/scripts/status_params.py |  1 +
 .../0.96.0.2.0/package/templates/hbase.conf.j2  | 35 ++++++++++++++++++++
 .../stacks/2.0.6/HBASE/test_hbase_client.py     | 28 ++++++++++++++--
 .../stacks/2.0.6/HBASE/test_hbase_master.py     | 34 ++++++++++++++++++-
 .../2.0.6/HBASE/test_hbase_regionserver.py      | 33 ++++++++++++++++++
 .../2.0.6/HBASE/test_phoenix_queryserver.py     | 24 +++++++++++++-
 9 files changed, 182 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/bff61b83/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml
index 242e9a7..f1dd64f 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml
@@ -86,12 +86,22 @@ and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds this value.
       <increment-step>256</increment-step>
     </value-attributes>
   </property>
-   <property>
+  <property>
     <name>hbase_user</name>
     <value>hbase</value>
     <property-type>USER</property-type>
     <description>HBase User Name.</description>
   </property>
+  <property>
+    <name>hbase_user_nofile_limit</name>
+    <value>32000</value>
+    <description>Max open files limit setting for HBASE user.</description>
+  </property>
+  <property>
+    <name>hbase_user_nproc_limit</name>
+    <value>16000</value>
+    <description>Max number of processes limit setting for HBASE user.</description>
+  </property>
 
   <!-- hbase-env.sh -->
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/bff61b83/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
index 991f4e7..6b5369e 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
@@ -55,7 +55,7 @@ def hbase(name=None):
       group = params.user_group,
       recursive = True
   )
-  
+ 
   parent_dir = os.path.dirname(params.tmp_dir)
   # In case if we have several placeholders in path
   while ("${" in parent_dir):
@@ -119,8 +119,22 @@ def hbase(name=None):
        owner = params.hbase_user,
        content=InlineTemplate(params.hbase_env_sh_template),
        group = params.user_group,
-  )     
-       
+  )
+  
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            recursive=True,
+            owner='root',
+            group='root'
+            )
+  
+  File(os.path.join(params.limits_conf_dir, 'hbase.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hbase.conf.j2")
+       )
+    
   hbase_TemplateConfig( params.metric_prop_file_name,
     tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'
   )

http://git-wip-us.apache.org/repos/asf/ambari/blob/bff61b83/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index f22b035..635be5e 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -75,6 +75,10 @@ if Script.is_hdp_stack_greater_or_equal("2.2"):
 
 
 hbase_conf_dir = status_params.hbase_conf_dir
+limits_conf_dir = status_params.limits_conf_dir
+
+hbase_user_nofile_limit = default("/configurations/hbase-env/hbase_user_nofile_limit", "32000")
+hbase_user_nproc_limit = default("/configurations/hbase-env/hbase_user_nproc_limit", "16000")
 
 # no symlink for phoenix-server at this point
 phx_daemon_script = '/usr/hdp/current/phoenix-server/bin/queryserver.py'

http://git-wip-us.apache.org/repos/asf/ambari/blob/bff61b83/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
index 084ee06..014e8d7 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
@@ -50,5 +50,6 @@ else:
   tmp_dir = Script.get_tmp_dir()
 
   hbase_conf_dir = "/etc/hbase/conf"
+  limits_conf_dir = "/etc/security/limits.d"
   if Script.is_hdp_stack_greater_or_equal("2.2"):
     hbase_conf_dir = format("/usr/hdp/current/{component_directory}/conf")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/bff61b83/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase.conf.j2
new file mode 100644
index 0000000..3580db0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{hbase_user}}   - nofile   {{hbase_user_nofile_limit}}
+{{hbase_user}}   - nproc    {{hbase_user_nproc_limit}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/bff61b83/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
index 9959874..8d96849 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
@@ -90,6 +90,17 @@ class TestHBaseClient(RMFTestCase):
         owner = 'hbase',
         group = 'hadoop'
     )
+    self.assertResourceCalled('Directory', '/etc/security/limits.d',
+      owner = 'root',
+      group = 'root',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
+      content = Template('hbase.conf.j2'),
+      owner = 'root',
+      group = 'root',
+      mode = 0644,
+    )
     self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
       owner = 'hbase',
       template_tag = 'GANGLIA-RS',
@@ -167,9 +178,20 @@ class TestHBaseClient(RMFTestCase):
       group = 'hadoop',
     )
     self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
-        content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
-        owner = 'hbase',
-        group = 'hadoop',
+      content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
+      owner = 'hbase',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('Directory', '/etc/security/limits.d',
+      owner = 'root',
+      group = 'root',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
+      content = Template('hbase.conf.j2'),
+      owner = 'root',
+      group = 'root',
+      mode = 0644,
     )
     self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
       owner = 'hbase',

http://git-wip-us.apache.org/repos/asf/ambari/blob/bff61b83/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 74b4aa6..4ca74a8 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -277,6 +277,17 @@ class TestHBaseMaster(RMFTestCase):
       content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
       group = 'hadoop',
     )
+    self.assertResourceCalled('Directory', '/etc/security/limits.d',
+      owner = 'root',
+      group = 'root',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
+      content = Template('hbase.conf.j2'),
+      owner = 'root',
+      group = 'root',
+      mode = 0644,
+    )
     self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
       owner = 'hbase',
       template_tag = 'GANGLIA-MASTER',
@@ -393,6 +404,17 @@ class TestHBaseMaster(RMFTestCase):
       content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
       group = 'hadoop',
     )
+    self.assertResourceCalled('Directory', '/etc/security/limits.d',
+      owner = 'root',
+      group = 'root',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
+      content = Template('hbase.conf.j2'),
+      owner = 'root',
+      group = 'root',
+      mode = 0644,
+    )
     self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
       owner = 'hbase',
       template_tag = 'GANGLIA-MASTER',
@@ -524,7 +546,17 @@ class TestHBaseMaster(RMFTestCase):
       content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
       group = 'hadoop'
     )
-
+    self.assertResourceCalled('Directory', '/etc/security/limits.d',
+      owner = 'root',
+      group = 'root',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
+      content = Template('hbase.conf.j2'),
+      owner = 'root',
+      group = 'root',
+      mode = 0644,
+    )
     self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-master/conf/hadoop-metrics2-hbase.properties',
       owner = 'hbase',
       template_tag = 'GANGLIA-MASTER')

http://git-wip-us.apache.org/repos/asf/ambari/blob/bff61b83/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
index fa134b4..e22584a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
@@ -177,6 +177,17 @@ class TestHbaseRegionServer(RMFTestCase):
       content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
       group = 'hadoop',
     )
+    self.assertResourceCalled('Directory', '/etc/security/limits.d',
+      owner = 'root',
+      group = 'root',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
+      content = Template('hbase.conf.j2'),
+      owner = 'root',
+      group = 'root',
+      mode = 0644,
+    )
     self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
       owner = 'hbase',
       template_tag = 'GANGLIA-RS',
@@ -254,6 +265,17 @@ class TestHbaseRegionServer(RMFTestCase):
       content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
       group = 'hadoop',
     )
+    self.assertResourceCalled('Directory', '/etc/security/limits.d',
+        owner = 'root',
+        group = 'root',
+        recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
+        content = Template('hbase.conf.j2'),
+        owner = 'root',
+        group = 'root',
+        mode = 0644,
+    )
     self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
       owner = 'hbase',
       template_tag = 'GANGLIA-RS',
@@ -346,6 +368,17 @@ class TestHbaseRegionServer(RMFTestCase):
       content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
       group = 'hadoop'
     )
+    self.assertResourceCalled('Directory', '/etc/security/limits.d',
+      owner = 'root',
+      group = 'root',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
+      content = Template('hbase.conf.j2'),
+      owner = 'root',
+      group = 'root',
+      mode = 0644,
+    )
 
     self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-regionserver/conf/hadoop-metrics2-hbase.properties',
       owner = 'hbase',

http://git-wip-us.apache.org/repos/asf/ambari/blob/bff61b83/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
index 51a9edc..2cb535c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
@@ -271,6 +271,17 @@ class TestPhoenixQueryServer(RMFTestCase):
         self.getConfig()['configurations']['hbase-env']['content']),
       group = 'hadoop',
     )
+    self.assertResourceCalled('Directory', '/etc/security/limits.d',
+      owner = 'root',
+      group = 'root',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
+      content = Template('hbase.conf.j2'),
+      owner = 'root',
+      group = 'root',
+      mode = 0644,
+    )    
     self.assertResourceCalled('TemplateConfig',
       '/usr/hdp/current/hbase-regionserver/conf/hadoop-metrics2-hbase.properties',
       owner = 'hbase',
@@ -361,6 +372,17 @@ class TestPhoenixQueryServer(RMFTestCase):
         self.getConfig()['configurations']['hbase-env']['content']),
       group = 'hadoop',
     )
+    self.assertResourceCalled('Directory', '/etc/security/limits.d',
+      owner = 'root',
+      group = 'root',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
+      content = Template('hbase.conf.j2'),
+      owner = 'root',
+      group = 'root',
+      mode = 0644,
+    )
     self.assertResourceCalled('TemplateConfig',
       '/usr/hdp/current/hbase-regionserver/conf/hadoop-metrics2-hbase.properties',
       owner = 'hbase',
@@ -415,4 +437,4 @@ class TestPhoenixQueryServer(RMFTestCase):
     )
     self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'phoenix-server', '2.3.0.0-1234'), sudo=True)
 
-    self.assertNoMoreResources()
\ No newline at end of file
+    self.assertNoMoreResources()


[35/50] [abbrv] ambari git commit: AMBARI-13251. RU - HDFS_Client restart and hdp-select causes dfs_data_dir_mount.hist to be lost, move file to static location (alejandro)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.2/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/default.json b/ambari-server/src/test/python/stacks/2.2/configs/default.json
index dcd38d6..5a5554e 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/default.json
@@ -158,7 +158,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "256m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.2/configs/default_custom_path_config.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/default_custom_path_config.json b/ambari-server/src/test/python/stacks/2.2/configs/default_custom_path_config.json
index 2c9c918..a70f4c1 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/default_custom_path_config.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/default_custom_path_config.json
@@ -157,7 +157,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"",
             "hdfs_user": "hdfs",
             "namenode_opt_newsize": "256m",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist",
             "hadoop_root_logger": "INFO,RFA",
             "hadoop_heapsize": "1024",
             "namenode_opt_maxpermsize": "256m",

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.2/configs/falcon-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/falcon-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/falcon-upgrade.json
index a4f9dc9..0a5f6e9 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/falcon-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/falcon-upgrade.json
@@ -214,7 +214,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "200m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
index 95456f9..99d0e83 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
@@ -323,7 +323,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"",
             "hdfs_user": "hdfs",
             "namenode_opt_newsize": "200m",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist",
             "hadoop_root_logger": "INFO,RFA",
             "hadoop_heapsize": "1024",
             "namenode_opt_maxpermsize": "256m",

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json b/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
index e06882a..8734556 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
@@ -740,7 +740,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "256m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade.json
index 1f23f11..8d9fb7b 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade.json
@@ -740,7 +740,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "256m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
index c77b3ec..41bfec4 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
@@ -111,7 +111,6 @@
             "namenode_opt_maxpermsize": "256m", 
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP
 _DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryEr
 ror=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.secur
 ity.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/
 $USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null
 `\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/usr/hdp/current/tez-client/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/usr/hdp/current/tez-client/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VE
 RSION $HADOOP_OPTS\"", 
             "namenode_heapsize": "1024m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "namenode_opt_newsize": "128m", 
             "nfsgateway_heapsize": "1024", 
             "dtnode_heapsize": "1024m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.2/configs/oozie-downgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/oozie-downgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/oozie-downgrade.json
index e9018d8..40a6e95 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/oozie-downgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/oozie-downgrade.json
@@ -122,7 +122,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "200m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.2/configs/oozie-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/oozie-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/oozie-upgrade.json
index fc4df58..66089c8 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/oozie-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/oozie-upgrade.json
@@ -122,7 +122,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "200m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json b/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
index ad7a75a..2cc66a6 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
@@ -343,7 +343,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "256m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-default.json b/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-default.json
index f012a17..f39df97 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-default.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-default.json
@@ -128,7 +128,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "256m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-upgrade.json
index f47a247..c04bdd1 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-upgrade.json
@@ -613,7 +613,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "256m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 


[22/50] [abbrv] ambari git commit: AMBARI-13248: Parallel library should process all futures even if one of them throws an exception (jluniya)

Posted by nc...@apache.org.
AMBARI-13248: Parallel library should process all futures even if one of them throws an exception (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/474e4086
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/474e4086
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/474e4086

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 474e40862f0e66d12d333d83509453ab5c8728e6
Parents: 156afda
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Sat Sep 26 11:27:32 2015 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Sat Sep 26 11:27:32 2015 -0700

----------------------------------------------------------------------
 .../apache/ambari/server/utils/Parallel.java    | 30 ++++++++++------
 .../ambari/server/utils/TestParallel.java       | 37 +++++++++++++++++++-
 2 files changed, 56 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/474e4086/ambari-server/src/main/java/org/apache/ambari/server/utils/Parallel.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/Parallel.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/Parallel.java
index c6e2156..0a3e6c4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/Parallel.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/Parallel.java
@@ -23,6 +23,7 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Callable;
+import java.util.concurrent.CancellationException;
 import java.util.concurrent.CompletionService;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorCompletionService;
@@ -185,11 +186,16 @@ public class Parallel {
 
     boolean completed = true;
     R[] result = (R[]) new Object[futures.size()];
-    try {
-      for (int i = 0; i < futures.size(); i++) {
-        Future<ResultWrapper<R>> futureResult = completionService.poll(POLL_DURATION_MILLISECONDS, TimeUnit.MILLISECONDS);
+    for (int i = 0; i < futures.size(); i++) {
+      try {
+        Future<ResultWrapper<R>> futureResult = null;
+        try {
+          futureResult = completionService.poll(POLL_DURATION_MILLISECONDS, TimeUnit.MILLISECONDS);
+        } catch (InterruptedException e) {
+          LOG.error("Caught InterruptedException in Parallel.forLoop", e);
+        }
         if (futureResult == null) {
-          // Time out! no progress was made during the last poll duration. Abort the threads and cancel the threads.
+          // Timed out! no progress was made during the last poll duration. Abort the threads and cancel the threads.
           LOG.error("Completion service in Parallel.forLoop timed out!");
           completed = false;
           for(int fIndex = 0; fIndex < futures.size(); fIndex++) {
@@ -204,6 +210,7 @@ public class Parallel {
               LOG.debug("    Task - {} successfully cancelled", fIndex);
             }
           }
+          // Finished processing all futures
           break;
         } else {
           ResultWrapper<R> res = futureResult.get();
@@ -214,13 +221,16 @@ public class Parallel {
             completed = false;
           }
         }
+      } catch (InterruptedException e) {
+        LOG.error("Caught InterruptedException in Parallel.forLoop", e);
+        completed = false;
+      } catch (ExecutionException e) {
+        LOG.error("Caught ExecutionException in Parallel.forLoop", e);
+        completed = false;
+      } catch (CancellationException e) {
+        LOG.error("Caught CancellationException in Parallel.forLoop", e);
+        completed = false;
       }
-    } catch (InterruptedException e) {
-      LOG.error("Caught InterruptedException in Parallel.forLoop", e);
-      completed = false;
-    } catch (ExecutionException e) {
-      LOG.error("Caught ExecutionException in Parallel.forLoop", e);
-      completed = false;
     }
     // Return parallel loop result
     return new ParallelLoopResult<R>(completed, Arrays.asList(result));

http://git-wip-us.apache.org/repos/asf/ambari/blob/474e4086/ambari-server/src/test/java/org/apache/ambari/server/utils/TestParallel.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/utils/TestParallel.java b/ambari-server/src/test/java/org/apache/ambari/server/utils/TestParallel.java
index 0628f20..bfeb446 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/utils/TestParallel.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/utils/TestParallel.java
@@ -109,7 +109,7 @@ public class TestParallel {
    * @throws Exception
    */
   @Test
-  public void testNestedParallelForLoopIterationFailures() throws Exception {
+  public void testNestedParallelForLoop() throws Exception {
     final List<Integer> input = new LinkedList<Integer>();
     for(int i = 0; i < 10; i++) {
       input.add(i);
@@ -185,4 +185,39 @@ public class TestParallel {
       }
     }
   }
+
+  /**
+   * Tests {@link org.apache.ambari.server.utils.Parallel} forLoop iteration exceptions
+   * @throws Exception
+   */
+  @Test
+  public void testParallelForLoopIterationExceptions() throws Exception {
+    final List<Integer> input = new LinkedList<Integer>();
+    for(int i = 0; i < 10; i++) {
+      input.add(i);
+    }
+    final List<Integer> failForList = Arrays.asList(new Integer[] { 2, 5, 7});
+    ParallelLoopResult<Integer> loopResult = Parallel.forLoop(input, new LoopBody<Integer, Integer>() {
+      @Override
+      public Integer run(Integer in1) {
+        if(failForList.contains(in1)) {
+          throw new RuntimeException("Ignore this exception");
+        }
+        return in1 * in1;
+      }
+    });
+    Assert.assertFalse(loopResult.getIsCompleted());
+    Assert.assertNotNull(loopResult.getResult());
+    List<Integer> output = loopResult.getResult();
+    Assert.assertEquals(input.size(), output.size());
+
+    for(int i = 0; i < input.size(); i++) {
+      if(failForList.contains(i)) {
+        Assert.assertNull(output.get(i));
+        output.set(i, i * i);
+      } else {
+        Assert.assertEquals(i * i, (int) output.get(i));
+      }
+    }
+  }
 }


[33/50] [abbrv] ambari git commit: AMBARI-13260. Storm service check fails after manual stack upgrade. (mpapirkovskyy)

Posted by nc...@apache.org.
AMBARI-13260. Storm service check fails after manual stack upgrade. (mpapirkovskyy)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4d44ec7c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4d44ec7c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4d44ec7c

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 4d44ec7cc4e150d88069c6870477234d8be721ff
Parents: a6ed363
Author: Myroslav Papirkovskyy <mp...@hortonworks.com>
Authored: Mon Sep 28 20:34:36 2015 +0300
Committer: Myroslav Papirkovskyy <mp...@hortonworks.com>
Committed: Mon Sep 28 20:34:36 2015 +0300

----------------------------------------------------------------------
 .../upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json          | 9 +++++++++
 1 file changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4d44ec7c/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json
index 250f5b2..422b86b 100644
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json
+++ b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json
@@ -47,10 +47,19 @@
           },
           "storm-site":{
             "merged-copy": "yes"
+          },
+          "storm-env": {
+            "merged-copy": "yes",
+            "required-services": [
+              "STORM"
+            ]
           }
         }
       },
       "properties": {
+        "storm-env": {
+          "nimbus_seeds_supported": "true"
+        },
         "storm-site": {
           "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port={{jmxremote_port}} -javaagent:/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM",
           "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",


[40/50] [abbrv] ambari git commit: AMBARI-13263. Create a Ranger theme with Ranger Admin. (jaimin)

Posted by nc...@apache.org.
AMBARI-13263. Create a Ranger theme with Ranger Admin. (jaimin)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/61540bbb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/61540bbb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/61540bbb

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 61540bbb54d023b52ce7e59fee81a067a1cbdcc8
Parents: 2b34016
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Mon Sep 28 20:22:26 2015 -0700
Committer: Jaimin Jetly <ja...@hortonworks.com>
Committed: Mon Sep 28 20:23:52 2015 -0700

----------------------------------------------------------------------
 .../server/state/ValueAttributesInfo.java       |  16 +
 .../server/state/theme/ConfigCondition.java     |  87 +++++
 .../server/state/theme/ConfigPlacement.java     |  26 ++
 .../ambari/server/state/theme/Subsection.java   |  28 ++
 .../ambari/server/state/theme/Widget.java       |  23 +-
 .../0.4.0/configuration/admin-properties.xml    |  12 +
 .../RANGER/0.4.0/configuration/ranger-env.xml   |   3 +-
 .../RANGER/configuration/admin-properties.xml   |  58 +++
 .../RANGER/configuration/ranger-env.xml         |  21 +-
 .../stacks/HDP/2.3/services/RANGER/metainfo.xml |   7 +
 .../HDP/2.3/services/RANGER/themes/theme.json   | 294 +++++++++++++++
 ambari-web/app/app.js                           |   2 +
 .../controllers/main/service/info/configs.js    |   9 +-
 .../app/controllers/wizard/step7_controller.js  |   2 +-
 ambari-web/app/data/HDP2.2/site_properties.js   |  13 +-
 ambari-web/app/data/HDP2.3/site_properties.js   |  44 +--
 ambari-web/app/mappers/configs/themes_mapper.js |  84 ++++-
 ambari-web/app/models.js                        |   1 +
 .../app/models/configs/config_condition.js      |  60 +++
 ambari-web/app/models/configs/section.js        |   6 +-
 .../app/models/configs/stack_config_property.js |  10 +
 ambari-web/app/models/configs/sub_section.js    |  21 +-
 .../configs/service_config_layout_tab.hbs       |  10 +-
 .../widgets/test_db_connection_widget.hbs       |  35 ++
 ambari-web/app/utils/config.js                  |  43 ++-
 ambari-web/app/views.js                         |   1 +
 .../configs/service_config_layout_tab_view.js   |  24 +-
 .../configs/widgets/config_widget_view.js       |  50 +++
 .../widgets/password_config_widget_view.js      |   1 +
 .../widgets/test_db_connection_widget_view.js   | 364 +++++++++++++++++++
 .../test/mappers/configs/themes_mapper_test.js  |   2 +
 ambari-web/test/models/configs/section_test.js  |  12 +-
 32 files changed, 1278 insertions(+), 91 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-server/src/main/java/org/apache/ambari/server/state/ValueAttributesInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ValueAttributesInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ValueAttributesInfo.java
index e8cd074..3f7f756 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ValueAttributesInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ValueAttributesInfo.java
@@ -43,6 +43,10 @@ public class ValueAttributesInfo {
   @JsonProperty("empty_value_valid")
   private Boolean emptyValueValid;
 
+  @XmlElement(name = "ui-only-property")
+  @JsonProperty("ui_only_property")
+  private Boolean uiOnlyProperty;
+
   @XmlElement(name = "read-only")
   @JsonProperty("read_only")
   private Boolean readOnly;
@@ -194,6 +198,14 @@ public class ValueAttributesInfo {
     this.showPropertyName = isPropertyNameVisible;
   }
 
+  public Boolean getUiOnlyProperty() {
+    return uiOnlyProperty;
+  }
+
+  public void setUiOnlyProperty(Boolean isUiOnlyProperty) {
+    this.uiOnlyProperty = isUiOnlyProperty;
+  }
+
   @Override
   public boolean equals(Object o) {
     if (this == o) return true;
@@ -216,6 +228,8 @@ public class ValueAttributesInfo {
       return false;
     if (showPropertyName != null ? !showPropertyName.equals(that.showPropertyName) : that.showPropertyName != null)
       return false;
+    if (uiOnlyProperty != null ? !uiOnlyProperty.equals(that.uiOnlyProperty) : that.uiOnlyProperty != null)
+      return false;
     if (maximum != null ? !maximum.equals(that.maximum) : that.maximum != null) return false;
     if (minimum != null ? !minimum.equals(that.minimum) : that.minimum != null) return false;
     if (selectionCardinality != null ? !selectionCardinality.equals(that.selectionCardinality) : that.selectionCardinality != null)
@@ -245,6 +259,7 @@ public class ValueAttributesInfo {
     result = 31 * result + (editableOnlyAtInstall != null ? editableOnlyAtInstall.hashCode() : 0);
     result = 31 * result + (overridable != null ? overridable.hashCode() : 0);
     result = 31 * result + (showPropertyName != null ? showPropertyName.hashCode() : 0);
+    result = 31 * result + (uiOnlyProperty != null ? uiOnlyProperty.hashCode() : 0);
     return result;
   }
 
@@ -263,6 +278,7 @@ public class ValueAttributesInfo {
       ", editableOnlyAtInstall='" + editableOnlyAtInstall + '\'' +
       ", overridable='" + overridable + '\'' +
       ", showPropertyName='" + showPropertyName + '\'' +
+      ", uiOnlyProperty='" + uiOnlyProperty + '\'' +
       ", incrementStep='" + incrementStep + '\'' +
       ", entriesEditable=" + entriesEditable +
       ", selectionCardinality='" + selectionCardinality + '\'' +

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-server/src/main/java/org/apache/ambari/server/state/theme/ConfigCondition.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/theme/ConfigCondition.java b/ambari-server/src/main/java/org/apache/ambari/server/state/theme/ConfigCondition.java
new file mode 100644
index 0000000..2d98660
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/theme/ConfigCondition.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.state.theme;
+
+import org.apache.ambari.server.state.ValueAttributesInfo;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import java.util.List;
+
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class ConfigCondition {
+  @JsonProperty("configs")
+  private List<String> configs;
+  @JsonProperty("if")
+  private String ifLabel;
+  @JsonProperty("then")
+  private ConfigConditionResult then;
+  @JsonProperty("else")
+  private ConfigConditionResult elseLabel;
+
+  public List<String> getConfigs() {
+    return configs;
+  }
+
+  public void setConfigs(List<String> configs) {
+    this.configs = configs;
+  }
+
+  public String getIfLabel() {
+    return ifLabel;
+  }
+
+  public void setIfLabel(String ifLabel) {
+    this.ifLabel = ifLabel;
+  }
+
+  public ConfigConditionResult getThen() {
+    return then;
+  }
+
+  public void setThen(ConfigConditionResult then) {
+    this.then = then;
+  }
+
+  public ConfigConditionResult getElseLabel() {
+    return elseLabel;
+  }
+
+  public void setElseLabel(ConfigConditionResult elseLabel) {
+    this.elseLabel = elseLabel;
+  }
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+  @JsonIgnoreProperties(ignoreUnknown = true)
+  public class ConfigConditionResult {
+    @JsonProperty("property_value_attributes")
+    private ValueAttributesInfo propertyValueAttributes;
+
+    public ValueAttributesInfo getPropertyValueAttributes() {
+      return propertyValueAttributes;
+    }
+
+    public void setPropertyValueAttributes(ValueAttributesInfo propertyValueAttributes) {
+      this.propertyValueAttributes = propertyValueAttributes;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-server/src/main/java/org/apache/ambari/server/state/theme/ConfigPlacement.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/theme/ConfigPlacement.java b/ambari-server/src/main/java/org/apache/ambari/server/state/theme/ConfigPlacement.java
index c20cd8e..56d2ea2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/theme/ConfigPlacement.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/theme/ConfigPlacement.java
@@ -18,10 +18,13 @@
 
 package org.apache.ambari.server.state.theme;
 
+import org.apache.ambari.server.state.ValueAttributesInfo;
 import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 import org.codehaus.jackson.annotate.JsonProperty;
 import org.codehaus.jackson.map.annotate.JsonSerialize;
 
+import java.util.List;
+
 @JsonSerialize(include= JsonSerialize.Inclusion.NON_NULL)
 @JsonIgnoreProperties(ignoreUnknown = true)
 public class ConfigPlacement {
@@ -30,6 +33,13 @@ public class ConfigPlacement {
 	@JsonProperty("subsection-name")
 	private String subsectionName;
 
+  @JsonProperty("property_value_attributes")
+  private ValueAttributesInfo propertyValueAttributes;
+
+  @JsonProperty("depends-on")
+  private List<ConfigCondition> dependsOn;
+
+
   public String getConfig() {
     return config;
   }
@@ -46,6 +56,22 @@ public class ConfigPlacement {
     this.subsectionName = subsectionName;
   }
 
+  public ValueAttributesInfo getPropertyValueAttributes() {
+    return propertyValueAttributes;
+  }
+
+  public void setPropertyValueAttributes(ValueAttributesInfo propertyValueAttributes) {
+    this.propertyValueAttributes = propertyValueAttributes;
+  }
+
+  public List<ConfigCondition> getDependsOn() {
+    return dependsOn;
+  }
+
+  public void setDependsOn(List<ConfigCondition> dependsOn) {
+    this.dependsOn = dependsOn;
+  }
+
   public boolean isRemoved() {
     return subsectionName == null;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-server/src/main/java/org/apache/ambari/server/state/theme/Subsection.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/theme/Subsection.java b/ambari-server/src/main/java/org/apache/ambari/server/state/theme/Subsection.java
index b86b51f..0397545 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/theme/Subsection.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/theme/Subsection.java
@@ -23,6 +23,8 @@ import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 import org.codehaus.jackson.annotate.JsonProperty;
 import org.codehaus.jackson.map.annotate.JsonSerialize;
 
+import java.util.List;
+
 
 @JsonSerialize(include= JsonSerialize.Inclusion.NON_NULL)
 @JsonIgnoreProperties(ignoreUnknown = true)
@@ -41,6 +43,10 @@ public class Subsection {
 	private String columnIndex;
   @JsonProperty("border")
 	private String border;
+  @JsonProperty("left-vertical-splitter")
+  private Boolean leftVerticalSplitter;
+  @JsonProperty("depends-on")
+  private List<ConfigCondition> dependsOn;
 
 
   public String getRowIndex() {
@@ -99,6 +105,22 @@ public class Subsection {
     this.border = border;
   }
 
+  public Boolean getLeftVerticalSplitter() {
+    return leftVerticalSplitter;
+  }
+
+  public void setLeftVerticalSplitter(Boolean leftVerticalSplitter) {
+    this.leftVerticalSplitter = leftVerticalSplitter;
+  }
+
+  public List<ConfigCondition> getDependsOn() {
+    return dependsOn;
+  }
+
+  public void setDependsOn(List<ConfigCondition> dependsOn) {
+    this.dependsOn = dependsOn;
+  }
+
   public boolean isRemoved() {
     return rowIndex == null && rowSpan == null && columnIndex == null && columnSpan == null;
   }
@@ -122,5 +144,11 @@ public class Subsection {
     if (border == null) {
       border = parent.border;
     }
+    if (leftVerticalSplitter == null) {
+      leftVerticalSplitter = parent.leftVerticalSplitter;
+    }
+    if (dependsOn == null) {
+      dependsOn = parent.dependsOn;
+    }
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-server/src/main/java/org/apache/ambari/server/state/theme/Widget.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/theme/Widget.java b/ambari-server/src/main/java/org/apache/ambari/server/state/theme/Widget.java
index 7b1e09c..c8176ee 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/theme/Widget.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/theme/Widget.java
@@ -24,6 +24,7 @@ import org.codehaus.jackson.annotate.JsonProperty;
 import org.codehaus.jackson.map.annotate.JsonSerialize;
 
 import java.util.List;
+import java.util.Map;
 
 
 @JsonSerialize(include=JsonSerialize.Inclusion.NON_NULL)
@@ -33,6 +34,10 @@ public class Widget{
 	private String type;
 	@JsonProperty("units")
 	private List<Unit> units;
+  @JsonProperty("required-properties")
+  private Map<String,String> requiredProperties;
+  @JsonProperty("display-name")
+  private String displayName;
 
   public String getType() {
     return type;
@@ -49,4 +54,20 @@ public class Widget{
   public void setUnits(List<Unit> units) {
     this.units = units;
   }
-}
\ No newline at end of file
+
+  public Map<String, String> getRequiredProperties() {
+    return requiredProperties;
+  }
+
+  public void setRequiredProperties(Map<String, String> requiredProperties) {
+    this.requiredProperties = requiredProperties;
+  }
+
+  public String getDisplayName() {
+    return displayName;
+  }
+
+  public void setDisplayName(String displayName) {
+    this.displayName = displayName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml
index 936c332..c7e3ff9 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml
@@ -28,6 +28,18 @@
     <description>The database type to be used (mysql/oracle)</description>
     <value-attributes>
       <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>MYSQL</value>
+          <label>MYSQL</label>
+        </entry>
+        <entry>
+          <value>ORACLE</value>
+          <label>ORACLE</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
     </value-attributes>
   </property>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/ranger-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/ranger-env.xml b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/ranger-env.xml
index 95c3b50..97c2b9f 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/ranger-env.xml
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/ranger-env.xml
@@ -58,6 +58,7 @@
     <name>ranger_admin_username</name>
     <value>amb_ranger_admin</value>
     <property-type>TEXT</property-type>
+    <display-name>Ranger Admin username for Ambari</display-name>
     <description>This is the ambari user created for creating repositories and policies in Ranger Admin for each plugin</description>
   </property>
 
@@ -102,6 +103,6 @@
     <name>ranger_pid_dir</name>
     <value>/var/run/ranger</value>
     <description></description>
-  </property>    
+  </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/configuration/admin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/configuration/admin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/configuration/admin-properties.xml
index 114c3ab..5d7f7ce 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/configuration/admin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/configuration/admin-properties.xml
@@ -22,6 +22,64 @@
 <configuration supports_final="false">
 
   <property>
+    <name>DB_FLAVOR</name>
+    <value>MYSQL</value>
+    <display-name>DB FLAVOR</display-name>
+    <description>The database type to be used (mysql/oracle)</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>MYSQL</value>
+          <label>MYSQL</label>
+        </entry>
+        <entry>
+          <value>ORACLE</value>
+          <label>ORACLE</label>
+        </entry>
+        <entry>
+          <value>POSTGRES</value>
+          <label>POSTGRES</label>
+        </entry>
+        <entry>
+          <value>MSSQL</value>
+          <label>MSSQL</label>
+        </entry>
+        <entry>
+          <value>SQLA</value>
+          <label>SQL Anywhere</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>db_root_user</name>
+    <value>root</value>
+    <display-name>Ranger DB root user</display-name>
+    <description>Database admin user</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+  </property>
+
+  <property require-input="true">
+    <name>db_root_password</name>
+    <value></value>
+    <property-type>PASSWORD</property-type>
+    <display-name>Ranger DB root password</display-name>
+    <description>Database password for the database admin user-id</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+  </property>
+
+
+  <property>
     <name>policymgr_http_enabled</name>
     <deleted>true</deleted>
   </property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/configuration/ranger-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/configuration/ranger-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/configuration/ranger-env.xml
index 8308865..7f3e6e0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/configuration/ranger-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/configuration/ranger-env.xml
@@ -28,9 +28,24 @@
 
   <property>
     <name>create_db_dbuser</name>
-    <value>true</value>
-    <display-name>Setup DB and DB user</display-name>
-    <description>Setup Ranger Database and Database User?</description>
+    <value>false</value>
+    <display-name>Setup Database and Databse User</display-name>
+    <description>If set to Yes, Ranger will Setup Database and Databse User. This will require to specify Database root user and password</description>
+    <value-attributes>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Yes</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>No</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/metainfo.xml
index a13fabf..69d908b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/metainfo.xml
@@ -52,6 +52,13 @@
         </osSpecific>
       </osSpecifics>
 
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
       <configuration-dependencies>
         <config-type>ranger-admin-site</config-type>
         <config-type>ranger-ugsync-site</config-type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/themes/theme.json b/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/themes/theme.json
new file mode 100644
index 0000000..7160a4f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/themes/theme.json
@@ -0,0 +1,294 @@
+{
+  "name": "default",
+  "description": "Default theme for Ranger service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "ranger_admin_settings",
+            "display-name": "Ranger Admin",
+            "layout": {
+              "tab-columns": "2",
+              "tab-rows": "2",
+              "sections": [
+                {
+                  "name": "section-ranger-admin",
+                  "display-name": "Ranger Admin",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "4",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "4",
+                  "subsections": [
+                    {
+                      "name": "subsection-ranger-db-row1-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-ranger-db-row1-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-ranger-db-row2-col1",
+                      "row-index": "1",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-ranger-db-row2-col2",
+                      "row-index": "1",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1",
+                      "left-vertical-splitter": false
+                    },
+                    {
+                      "name": "subsection-ranger-db-row3",
+                      "row-index": "2",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "2"
+                    },
+                    {
+                      "name": "subsection-ranger-db-row4-col1",
+                      "row-index": "3",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1",
+                      "depends-on": [
+                        {
+                          "configs":[
+                            "ranger-env/create_db_dbuser"
+                          ],
+                          "if": "${ranger-env/create_db_dbuser}",
+                          "then": {
+                            "property_value_attributes": {
+                              "visible": true
+                            }
+                          },
+                          "else": {
+                            "property_value_attributes": {
+                              "visible": false
+                            }
+                          }
+                        }
+                      ]
+                    },
+                    {
+                      "name": "subsection-ranger-db-row4-col2",
+                      "row-index": "3",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1",
+                      "depends-on": [
+                        {
+                          "configs":[
+                            "ranger-env/create_db_dbuser"
+                          ],
+                          "if": "${ranger-env/create_db_dbuser}",
+                          "then": {
+                            "property_value_attributes": {
+                              "visible": true
+                            }
+                          },
+                          "else": {
+                            "property_value_attributes": {
+                              "visible": false
+                            }
+                          }
+                        }
+                      ],
+                      "left-vertical-splitter": false
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "admin-properties/DB_FLAVOR",
+          "subsection-name": "subsection-ranger-db-row1-col1"
+        },
+        {
+          "config": "admin-properties/db_name",
+          "subsection-name": "subsection-ranger-db-row1-col1"
+        },
+        {
+          "config": "ranger-admin-site/ranger.jpa.jdbc.url",
+          "subsection-name": "subsection-ranger-db-row1-col1"
+        },
+        {
+          "config": "admin-properties/db_host",
+          "subsection-name": "subsection-ranger-db-row1-col2"
+        },
+        {
+          "config": "ranger-admin-site/ranger.jpa.jdbc.driver",
+          "subsection-name": "subsection-ranger-db-row1-col2"
+        },
+        {
+          "config": "admin-properties/db_user",
+          "subsection-name": "subsection-ranger-db-row2-col1"
+        },
+        {
+          "config": "admin-properties/db_password",
+          "subsection-name": "subsection-ranger-db-row2-col2"
+        },
+        {
+          "config": "ranger-env/test_db_connection",
+          "subsection-name": "subsection-ranger-db-row2-col2",
+          "property_value_attributes": {
+            "ui_only_property": true
+          },
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/create_db_dbuser"
+              ],
+              "if": "${ranger-env/create_db_dbuser}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/create_db_dbuser",
+          "subsection-name": "subsection-ranger-db-row3"
+        },
+        {
+          "config": "admin-properties/db_root_user",
+          "subsection-name": "subsection-ranger-db-row4-col1"
+        },
+        {
+          "config": "admin-properties/db_root_password",
+          "subsection-name": "subsection-ranger-db-row4-col2"
+        },
+        {
+          "config": "ranger-env/test_root_db_connection",
+          "subsection-name": "subsection-ranger-db-row4-col2",
+          "property_value_attributes": {
+            "ui_only_property": true
+          }
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "admin-properties/DB_FLAVOR",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "admin-properties/db_user",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/db_name",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-admin-site/ranger.jpa.jdbc.url",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-admin-site/ranger.jpa.jdbc.driver",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/db_host",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/db_password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "ranger-env/test_db_connection",
+        "widget": {
+          "type": "test-db-connection",
+          "display-name": "Test Connection",
+          "required-properties": {
+            "jdbc.driver.class": "ranger-admin-site/ranger.jpa.jdbc.driver",
+            "jdbc.driver.url": "ranger-admin-site/ranger.jpa.jdbc.url",
+            "db.connection.source.host": "ranger-site/ranger_admin_hosts",
+            "db.type": "admin-properties/DB_FLAVOR",
+            "db.connection.destination.host": "admin-properties/db_host",
+            "db.connection.user": "admin-properties/db_user",
+            "db.connection.password": "admin-properties/db_password"
+          }
+        }
+      },
+      {
+        "config": "ranger-env/create_db_dbuser",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "admin-properties/db_root_user",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/db_root_password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "ranger-env/test_root_db_connection",
+        "widget": {
+          "type": "test-db-connection",
+          "display-name": "Test Connection",
+          "required-properties": {
+            "jdbc.driver.class": "ranger-admin-site/ranger.jpa.jdbc.driver",
+            "jdbc.driver.url": "ranger-admin-site/ranger.jpa.jdbc.url",
+            "db.connection.source.host": "ranger-site/ranger_admin_hosts",
+            "db.type": "admin-properties/DB_FLAVOR",
+            "db.connection.destination.host": "admin-properties/db_host",
+            "db.connection.user": "admin-properties/db_root_user",
+            "db.connection.password": "admin-properties/db_root_password"
+          }
+        }
+      }
+    ]
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/app.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/app.js b/ambari-web/app/app.js
index f7c86b1..16bcc28 100644
--- a/ambari-web/app/app.js
+++ b/ambari-web/app/app.js
@@ -184,6 +184,8 @@ module.exports = Em.Application.create({
 
   allHostNames: [],
 
+  uiOnlyConfigDerivedFromTheme: [],
+
   currentStackVersionNumber: function () {
     var regExp = new RegExp(this.get('currentStackName') + '-');
     return (this.get('currentStackVersion') || this.get('defaultStackVersion')).replace(regExp, '');

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/controllers/main/service/info/configs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/info/configs.js b/ambari-web/app/controllers/main/service/info/configs.js
index db5f1ed..3252fa3 100644
--- a/ambari-web/app/controllers/main/service/info/configs.js
+++ b/ambari-web/app/controllers/main/service/info/configs.js
@@ -135,7 +135,7 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ConfigsLoader, A
     }).filter(function(config) {
       return !config.get('isValid') || (config.get('overrides') || []).someProperty('isValid', false);
     }).filterProperty('isVisible').length;
-  }.property('selectedService.configs.@each.isValid', 'selectedService.configs.@each.overrideErrorTrigger'),
+  }.property('selectedService.configs.@each.isValid', 'selectedService.configs.@each.isVisible', 'selectedService.configs.@each.overrideErrorTrigger'),
 
   /**
    * Determines if Save-button should be disabled
@@ -302,17 +302,18 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ConfigsLoader, A
    * @method loadStep
    */
   loadStep: function () {
+    var self = this;
     var serviceName = this.get('content.serviceName');
     this.clearStep();
     this.set('dependentServiceNames', App.StackService.find(serviceName).get('dependentServiceNames'));
     if (App.get('isClusterSupportsEnhancedConfigs')) {
       this.loadConfigTheme(serviceName).always(function() {
         App.themesMapper.generateAdvancedTabs([serviceName]);
+        // Theme mapper has UI only configs that needs to be merged with current service version configs
+        // This requires calling  `loadCurrentVersions` after theme has loaded
+        self.loadCurrentVersions();
       });
     }
-    if (!this.get('preSelectedConfigVersion')) {
-      this.loadCurrentVersions();
-    }
     this.loadServiceConfigVersions();
   },
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/controllers/wizard/step7_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js b/ambari-web/app/controllers/wizard/step7_controller.js
index bedd164..ff0e2ce 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -128,7 +128,7 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
     }).filter(function(config) {
       return !config.get('isValid') || (config.get('overrides') || []).someProperty('isValid', false);
     }).filterProperty('isVisible').length;
-  }.property('selectedService.configs.@each.isValid', 'selectedService.configs.@each.overrideErrorTrigger'),
+  }.property('selectedService.configs.@each.isValid', 'selectedService.configs.@each.isVisible','selectedService.configs.@each.overrideErrorTrigger'),
 
   /**
    * Should Next-button be disabled

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/data/HDP2.2/site_properties.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/HDP2.2/site_properties.js b/ambari-web/app/data/HDP2.2/site_properties.js
index 7341387..397e748 100644
--- a/ambari-web/app/data/HDP2.2/site_properties.js
+++ b/ambari-web/app/data/HDP2.2/site_properties.js
@@ -210,16 +210,25 @@ hdp22properties.push(
   },
   /**********************************************RANGER***************************************/
   {
+    "name": "ranger_admin_username",
+    "serviceName": "RANGER",
+    "filename": "ranger-env.xml",
+    "category": "RANGER_ADMIN",
+    "index": 0
+  },
+  {
     "name": "ranger_admin_password",
     "serviceName": "RANGER",
     "filename": "ranger-env.xml",
-    "category": "RANGER_ADMIN"
+    "category": "RANGER_ADMIN",
+    "index": 1
   },
   {
     "name": "SQL_CONNECTOR_JAR",
     "serviceName": "RANGER",
     "filename": "admin-properties.xml",
-    "category": "RANGER_ADMIN"
+    "category": "RANGER_ADMIN",
+    "index": 2
   },
   {
     "name": "DB_FLAVOR",

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/data/HDP2.3/site_properties.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/HDP2.3/site_properties.js b/ambari-web/app/data/HDP2.3/site_properties.js
index adf8cae..8041bc6 100644
--- a/ambari-web/app/data/HDP2.3/site_properties.js
+++ b/ambari-web/app/data/HDP2.3/site_properties.js
@@ -23,6 +23,8 @@ var hdp22properties = require('data/HDP2.2/site_properties').configProperties;
 var excludedConfigs = [
   'DB_FLAVOR',
   'db_name',
+  'db_user',
+  'db_password',
   'db_root_user',
   'db_root_password',
   'nimbus.host',
@@ -67,48 +69,8 @@ var hdp23properties = hdp22properties.filter(function (item) {
 });
 
 hdp23properties.push({
-    "name": "DB_FLAVOR",
-    "options": [
-      {
-        displayName: 'MYSQL'
-      },
-      {
-        displayName: 'ORACLE'
-      },
-      {
-        displayName: 'POSTGRES'
-      },
-      {
-        displayName: 'MSSQL'
-      },
-      {
-        displayName: 'SQLA',
-        hidden: App.get('currentStackName') !== 'SAPHD' && App.get('currentStackName') !== 'HDP'
-      }
-    ],
-    "displayType": "radio button",
-    "radioName": "RANGER DB_FLAVOR",
-    "serviceName": "RANGER",
-    "filename": "admin-properties.xml",
-    "category": "DBSettings",
-    "index": 1
-  },
-  {
-    "name": "db_host",
-    "serviceName": "RANGER",
-    "filename": "admin-properties.xml",
-    "category": "DBSettings",
-    "index": 2
-  },
-  {
-    "name": "create_db_dbuser",
-    "displayType": "checkbox",
-    "filename": "ranger-env.xml",
-    "category": "Advanced ranger-env",
-    "serviceName": "RANGER"
-  },
   /**************************************** RANGER - HDFS Plugin ***************************************/
-  {
+
     "name": "xasecure.audit.destination.db",
     "displayType": "checkbox",
     "filename": "ranger-hdfs-audit.xml",

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/mappers/configs/themes_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/configs/themes_mapper.js b/ambari-web/app/mappers/configs/themes_mapper.js
index e632860..b55a695 100644
--- a/ambari-web/app/mappers/configs/themes_mapper.js
+++ b/ambari-web/app/mappers/configs/themes_mapper.js
@@ -21,6 +21,7 @@ App.themesMapper = App.QuickDataMapper.create({
   tabModel: App.Tab,
   sectionModel: App.Section,
   subSectionModel: App.SubSection,
+  configConditionModel: App.ConfigCondition,
 
   tabConfig: {
     "id": "name",
@@ -55,7 +56,9 @@ App.themesMapper = App.QuickDataMapper.create({
     "column_span": "column-span",
     "row_span": "row-span",
     "configProperties": "config_properties",
-    "section_id": "section_id"
+    "section_id": "section_id",
+    "depends_on": "depends-on",
+    "left_vertical_splitter": "left-vertical-splitter"
   },
 
   map: function (json) {
@@ -123,22 +126,85 @@ App.themesMapper = App.QuickDataMapper.create({
    * @param {Object} json - json to parse
    */
   mapThemeConfigs: function(json) {
+    var serviceName = Em.get(json, "ThemeInfo.service_name");
     Em.getWithDefault(json, "ThemeInfo.theme_data.Theme.configuration.placement.configs", []).forEach(function(configLink) {
       var configId = this.getConfigId(configLink);
       var subSectionId = configLink["subsection-name"];
       var subSection = App.SubSection.find(subSectionId);
       var configProperty = App.StackConfigProperty.find(configId);
+      var subSectionDependsOnConfigs = subSection.get('dependsOn');
+      var configDependsOnOtherConfigs =  configLink["depends-on"] || [];
+      var dependsOnConfigs = configDependsOnOtherConfigs.concat(subSectionDependsOnConfigs);
 
-      if (configProperty && subSection) {
+      if (configProperty.get('id') && subSection) {
         subSection.get('configProperties').pushObject(configProperty);
         configProperty.set('subSection', subSection);
       } else {
-        console.warn('there is no such property: ' + configId + '. Or subsection: ' + subSectionId);
+        console.log('there is no such property: ' + configId + '. Or subsection: ' + subSectionId);
+        var valueAttributes = configLink["property_value_attributes"];
+        if (valueAttributes) {
+          var isUiOnlyProperty = valueAttributes["ui_only_property"];
+          // UI only configs are mentioned in the themes for supporting widgets that is not intended for setting a value
+          // And thus is affiliated witha fake config peperty termed as ui only config property
+          if (isUiOnlyProperty && subSection) {
+            var split = configLink.config.split("/");
+            var fileName =  split[0] + '.xml';
+            var configName = split[1];
+            var uiOnlyConfig = App.uiOnlyConfigDerivedFromTheme.filterProperty('filename', fileName).findProperty('name', configName);
+            if (!uiOnlyConfig) {
+              var coreObject = {
+                id: configName + '_' + split[0],
+                isRequiredByAgent: false,
+                showLabel: false,
+                isOverridable: false,
+                recommendedValue: true,
+                name: configName,
+                isUserProperty: false,
+                filename: fileName,
+                serviceName: serviceName,
+                subSection: subSection
+              };
+              var uiOnlyConfigDerivedFromTheme = Em.Object.create(App.config.createDefaultConfig(configName, serviceName, fileName, false, coreObject));
+              App.uiOnlyConfigDerivedFromTheme.pushObject(uiOnlyConfigDerivedFromTheme);
+            }
+          }
+        }
+      }
+
+      // map all the configs which conditionally affect the value attributes of a config
+      if (dependsOnConfigs && dependsOnConfigs.length) {
+        this.mapThemeConfigConditions(dependsOnConfigs, uiOnlyConfigDerivedFromTheme || configProperty);
       }
+
     }, this);
   },
 
   /**
+   *
+   * @param configConditions: Array
+   * @param configProperty: DS.Model Object (App.StackConfigProperty)
+   */
+  mapThemeConfigConditions: function(configConditions, configProperty) {
+    var configConditionsCopy = [];
+    configConditions.forEach(function(_configCondition, index){
+      var configCondition = $.extend({},_configCondition);
+      configCondition.id = configProperty.get('id') + '_' + index;
+      configCondition.config_name =  configProperty.get('name');
+      configCondition.file_name =  configProperty.get('filename');
+      configCondition.configs =  _configCondition.configs.map(function(item) {
+        var result = {};
+        result.fileName = item.split('/')[0] + '.xml';
+        result.configName = item.split('/')[1];
+        return result;
+      });
+      configConditionsCopy.pushObject(configCondition);
+    }, this);
+
+    App.store.loadMany(this.get("configConditionModel"), configConditionsCopy);
+    App.store.commit();
+  },
+
+  /**
    * add widget object to <code>stackConfigProperty<code>
    *
    * @param {Object} json - json to parse
@@ -148,10 +214,18 @@ App.themesMapper = App.QuickDataMapper.create({
       var configId = this.getConfigId(widget);
       var configProperty = App.StackConfigProperty.find(configId);
 
-      if (configProperty) {
+      if (configProperty.get('id')) {
         configProperty.set('widget', widget.widget);
       } else {
-        console.warn('there is no such property: ' + configId);
+        var split = widget.config.split("/");
+        var fileName =  split[0] + '.xml';
+        var configName = split[1];
+        var uiOnlyProperty = App.uiOnlyConfigDerivedFromTheme.filterProperty('filename',fileName).findProperty('name',configName);
+        if (uiOnlyProperty) {
+          uiOnlyProperty.set('widget', widget.widget);
+        } else {
+          console.warn('there is no such property: ' + configId);
+        }
       }
     }, this);
   },

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/models.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models.js b/ambari-web/app/models.js
index 77918e5..1b34b0c 100644
--- a/ambari-web/app/models.js
+++ b/ambari-web/app/models.js
@@ -60,6 +60,7 @@ require('models/master_component');
 require('models/host_stack_version');
 require('models/root_service');
 require('models/upgrade_entity');
+require('models/configs/config_condition');
 require('models/configs/service_config_version');
 require('models/configs/stack_config_property');
 require('models/configs/config_group');

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/models/configs/config_condition.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/config_condition.js b/ambari-web/app/models/configs/config_condition.js
new file mode 100644
index 0000000..26cf219
--- /dev/null
+++ b/ambari-web/app/models/configs/config_condition.js
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * THIS IS NOT USED FOR NOW
+ * FOR CONFIG GROUPS WE ARE USING OLD MODELS AND LOGIC
+ */
+
+var App = require('app');
+
+App.ConfigCondition = DS.Model.extend({
+  /**
+   * unique id generated as <code>config_name<code><code>filename<code>
+   * @property {string}
+   */
+  id: DS.attr('string'),
+
+  /**
+   * Name of the config that is being affected with the condition
+   */
+  configName: DS.attr('string'),
+
+  /**
+   * File name to which the config getting affected belongs
+   */
+  fileName: DS.attr('string'),
+
+  /**
+   * List of configs whose values affect the config
+   * Each Object in an array consists of configName and fileName
+   */
+  configs: DS.attr('array', {defaultValue: []}),
+
+  /**
+   * conditional String which can be evaluated to boolean result.
+   * If evaluated result of this staring is true then use the statement provided by `then` attribute.
+   * Otherwise use the attribute provided by `else` attributes
+   */
+  if: DS.attr('string'),
+  then: DS.attr('object', {defaultValue: null}),
+  else: DS.attr('object', {defaultValue: null})
+
+});
+
+App.ConfigCondition.FIXTURES = [];

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/models/configs/section.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/section.js b/ambari-web/app/models/configs/section.js
index 8f45757..c04665e 100644
--- a/ambari-web/app/models/configs/section.js
+++ b/ambari-web/app/models/configs/section.js
@@ -77,9 +77,9 @@ App.Section = DS.Model.extend({
    * @type {number}
    */
   errorsCount: function () {
-    var errors = this.get('subSections').mapProperty('errorsCount');
+    var errors = this.get('subSections').filterProperty('isSectionVisible').mapProperty('errorsCount');
     return errors.length ? errors.reduce(Em.sum) : 0;
-  }.property('subSections.@each.errorsCount'),
+  }.property('subSections.@each.errorsCount', 'subSections.@each.isSectionVisible'),
 
   /**
    * @type {boolean}
@@ -128,7 +128,7 @@ App.Section = DS.Model.extend({
    * @type {boolean}
    */
   isHiddenByFilter: function () {
-    return this.get('subSections').everyProperty('isHiddenByFilter', true);
+    return !this.get('subSections').someProperty('isSectionVisible', true);
   }.property('subSections.@each.isHiddenByFilter')
 
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/models/configs/stack_config_property.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/stack_config_property.js b/ambari-web/app/models/configs/stack_config_property.js
index 1289662..76e3b5f 100644
--- a/ambari-web/app/models/configs/stack_config_property.js
+++ b/ambari-web/app/models/configs/stack_config_property.js
@@ -284,3 +284,13 @@ App.StackConfigProperty = DS.Model.extend({
 
 
 App.StackConfigProperty.FIXTURES = [];
+
+App.StackConfigValAttributesMap = {
+  'overridable': 'isOverridable' ,
+  'visible': 'isVisible' ,
+  'empty_value_valid':'isRequired' ,
+  'editable_only_at_install': 'isReconfigurable' ,
+  'show_property_name': 'showLabel',
+  'read_only': 'isEditable',
+  'ui_only_property': 'isRequiredByAgent'
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/models/configs/sub_section.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/sub_section.js b/ambari-web/app/models/configs/sub_section.js
index d33fbb9..b7abb4f 100644
--- a/ambari-web/app/models/configs/sub_section.js
+++ b/ambari-web/app/models/configs/sub_section.js
@@ -67,6 +67,13 @@ App.SubSection = DS.Model.extend({
    */
   configProperties: DS.hasMany('App.StackConfigProperty'),
 
+  dependsOn: DS.attr('array', {defaultValue: []}),
+
+  /**
+   * @type {boolean}
+   */
+  leftVerticalSplitter: DS.attr('boolean', {defaultValue: true}),
+
   /**
    * @type {App.ServiceConfigProperty[]}
    */
@@ -86,8 +93,8 @@ App.SubSection = DS.Model.extend({
    * @type {boolean}
    */
   addLeftVerticalSplitter: function() {
-    return !this.get('isFirstColumn');
-  }.property('isFirstColumn'),
+    return !this.get('isFirstColumn') && this.get('leftVerticalSplitter');
+  }.property('isFirstColumn', 'leftVerticalSplitter'),
 
   /**
    * @type {boolean}
@@ -153,7 +160,15 @@ App.SubSection = DS.Model.extend({
   isHiddenByFilter: function () {
     var configs = this.get('configs');
     return configs.length ? configs.everyProperty('isHiddenByFilter', true) : false;
-  }.property('configs.@each.isHiddenByFilter')
+  }.property('configs.@each.isHiddenByFilter'),
+
+  /**
+   * Determines if subsection is visible
+   * @type {boolean}
+   */
+  isSectionVisible: function () {
+    return !this.get('isHiddenByFilter') && this.get('configs').someProperty('isVisible', true);
+  }.property('isHiddenByFilter', 'configs.@each.isVisible')
 });
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/templates/common/configs/service_config_layout_tab.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/configs/service_config_layout_tab.hbs b/ambari-web/app/templates/common/configs/service_config_layout_tab.hbs
index ac1dcc7..69de315 100644
--- a/ambari-web/app/templates/common/configs/service_config_layout_tab.hbs
+++ b/ambari-web/app/templates/common/configs/service_config_layout_tab.hbs
@@ -29,7 +29,7 @@
                 {{#each subRow in section.subsectionRows}}
                   <tr>
                     {{#each subsection in subRow}}
-                      <td {{bindAttr class="subsection.isHiddenByFilter:invisible subsection.showTopSplitter:top-horizontal-splitter:no-horizontal-splitter :config-subsection" colspan="subsection.columnSpan" rowspan="subsection.rowSpan"}}>
+                      <td {{bindAttr class="subsection.isSectionVisible::invisible subsection.showTopSplitter:top-horizontal-splitter:no-horizontal-splitter :config-subsection" colspan="subsection.columnSpan" rowspan="subsection.rowSpan"}}>
                         <div {{bindAttr class="subsection.addLeftVerticalSplitter:vertical-splitter-l"}}>
                           <div {{bindAttr class="subsection.border:with-border"}}>
                             <h5 class="subsection-display-name">
@@ -40,9 +40,11 @@
                             </h5>
                             {{#each config in subsection.configs}}
                               {{#if config.widget}}
-                                {{#unless config.isHiddenByFilter}}
-                                  {{view config.widget configBinding="config" canEditBinding="view.canEdit" sectionBinding="section" subSectionBinding="subsection" tabBinding="tab"}}
-                                {{/unless}}
+                                {{#if config.isVisible}}
+                                  {{#unless config.isHiddenByFilter}}
+                                    {{view config.widget configBinding="config" canEditBinding="view.canEdit" sectionBinding="section" subSectionBinding="subsection" tabBinding="tab"}}
+                                  {{/unless}}
+                                {{/if}}
                               {{/if}}
                             {{/each}}
                           </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/templates/common/configs/widgets/test_db_connection_widget.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/configs/widgets/test_db_connection_widget.hbs b/ambari-web/app/templates/common/configs/widgets/test_db_connection_widget.hbs
new file mode 100644
index 0000000..1cd4aaf
--- /dev/null
+++ b/ambari-web/app/templates/common/configs/widgets/test_db_connection_widget.hbs
@@ -0,0 +1,35 @@
+{{!
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+}}
+
+<div class="entry-row db-connection">
+  <span class="control-label"></span>
+
+  <div class="controls">
+    <div class="control-group">
+      <div class="span9">
+        <span {{bindAttr class=":pull-left :btn :btn-primary view.isBtnDisabled:disabled"}} {{action connectToDatabase target="view"}}>{{view.btnCaption}}</span>
+
+        <div class="pull-left connection-result mll">
+          <a {{bindAttr class="view.isConnectionSuccess:mute:action"}} {{action showLogsPopup target="view"}}>{{view.responseCaption}}</a>
+        </div>
+        <div {{bindAttr class=":spinner :mll :pull-left view.isConnecting::hide"}}></div>
+        <i {{bindAttr class=":pull-right view.isConnectionSuccess:icon-ok-sign:icon-warning-sign view.isRequestResolved::hide"}}></i>
+      </div>
+    </div>
+  </div>
+</div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/utils/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/config.js b/ambari-web/app/utils/config.js
index 51dfd8c..fc47221 100644
--- a/ambari-web/app/utils/config.js
+++ b/ambari-web/app/utils/config.js
@@ -208,16 +208,17 @@ App.config = Em.Object.create({
 
   /**
    * generates config objects
-   * @param configCategories
+   * @param configGroups
    * @param serviceName
    * @param selectedConfigGroup
    * @param canEdit
    * @returns {Array}
    */
-  mergePredefinedWithSaved: function (configCategories, serviceName, selectedConfigGroup, canEdit) {
+  mergePredefinedWithSaved: function (configGroups, serviceName, selectedConfigGroup, canEdit) {
     var configs = [];
+    var serviceConfigProperty;
 
-    configCategories.forEach(function (siteConfig) {
+    configGroups.forEach(function (siteConfig) {
       var service = this.getServiceByConfigType(siteConfig.type);
       if (service && serviceName != 'MISC') {
         serviceName = service.get('serviceName');
@@ -227,11 +228,22 @@ App.config = Em.Object.create({
       var finalAttributes = attributes.final || {};
       var properties = siteConfig.properties || {};
 
+      var uiOnlyConfigsObj = {};
+      var uiOnlyConfigDerivedFromTheme = App.uiOnlyConfigDerivedFromTheme.toArray();
+      uiOnlyConfigDerivedFromTheme.forEach(function(item) {
+        if (filename === item.filename) {
+          uiOnlyConfigsObj[item.name] = item.value;
+        }
+      });
+      properties = $.extend({}, properties, uiOnlyConfigsObj);
+
       for (var index in properties) {
         var id = this.configId(index, siteConfig.type);
-        var configsPropertyDef = this.get('preDefinedSitePropertiesMap')[id];
+        var preDefinedPropertyDef = this.get('preDefinedSitePropertiesMap')[id];
+        var uiOnlyConfigFromTheme = uiOnlyConfigDerivedFromTheme.findProperty('name', index);
+        var configsPropertyDef =  preDefinedPropertyDef  || uiOnlyConfigFromTheme;
         var advancedConfig = App.StackConfigProperty.find(id);
-        var isStackProperty = !!advancedConfig.get('id') || !!configsPropertyDef;
+        var isStackProperty = !!advancedConfig.get('id') || !!preDefinedPropertyDef;
         var template = this.createDefaultConfig(index, serviceName, filename, isStackProperty, configsPropertyDef);
         var serviceConfigObj = isStackProperty ? this.mergeStaticProperties(template, advancedConfig) : template;
 
@@ -340,8 +352,10 @@ App.config = Em.Object.create({
   mergeStaticProperties: function(coreObject, stackProperty, preDefined, propertiesToSkip) {
     propertiesToSkip = propertiesToSkip || ['name', 'filename', 'value', 'savedValue', 'isFinal', 'savedIsFinal'];
     for (var k in coreObject) {
-      if (!propertiesToSkip.contains(k)) {
-        coreObject[k] = this.getPropertyIfExists(k, coreObject[k], stackProperty, preDefined);
+      if (coreObject.hasOwnProperty(k)) {
+        if (!propertiesToSkip.contains(k)) {
+          coreObject[k] = this.getPropertyIfExists(k, coreObject[k], stackProperty, preDefined);
+        }
       }
     }
     return coreObject;
@@ -522,19 +536,22 @@ App.config = Em.Object.create({
     }).reduce(function(p,c) { return p.concat(c); }).concat(['cluster-env', 'alert_notification'])
       .uniq().compact().filter(function(configType) { return !!configType; });
 
+    // ui only required configs from theme are required to show configless widgets (widget that are not related to a config)
     var predefinedIds = Object.keys(this.get('preDefinedSitePropertiesMap'));
+    var uiOnlyConfigDerivedFromTheme =  App.uiOnlyConfigDerivedFromTheme.mapProperty('name');
     var stackIds = App.StackConfigProperty.find().filterProperty('isValueDefined').mapProperty('id');
 
-    var configIds = stackIds.concat(predefinedIds).uniq();
+    var configIds = stackIds.concat(predefinedIds).concat(uiOnlyConfigDerivedFromTheme).uniq();
 
     configIds.forEach(function(id) {
 
       var preDefined = this.get('preDefinedSitePropertiesMap')[id];
+      var isUIOnlyFromTheme = App.uiOnlyConfigDerivedFromTheme.findProperty('name',id);
       var advanced = App.StackConfigProperty.find(id);
 
-      var name = preDefined ? preDefined.name : advanced.get('name');
-      var filename = preDefined ? preDefined.filename : advanced.get('filename');
-      var isUIOnly = Em.getWithDefault(preDefined || {}, 'isRequiredByAgent', true) === false;
+      var name = preDefined ? preDefined.name : isUIOnlyFromTheme ? isUIOnlyFromTheme.get('name') : advanced.get('name');
+      var filename = preDefined ? preDefined.filename : isUIOnlyFromTheme ? isUIOnlyFromTheme.get('filename') : advanced.get('filename');
+      var isUIOnly = (Em.getWithDefault(preDefined || {}, 'isRequiredByAgent', true) === false) || isUIOnlyFromTheme;
       /*
         Take properties that:
           - UI specific only, marked with <code>isRequiredByAgent: false</code>
@@ -546,9 +563,9 @@ App.config = Em.Object.create({
       if (!(uiPersistentProperties.contains(id) || isUIOnly || advanced.get('id')) && filename != 'alert_notification') {
         return;
       }
-      var serviceName = preDefined ? preDefined.serviceName : advanced.get('serviceName');
+      var serviceName = preDefined ? preDefined.serviceName : isUIOnlyFromTheme ? isUIOnlyFromTheme.get('serviceName') : advanced.get('serviceName');
       if (configTypes.contains(this.getConfigTagFromFileName(filename))) {
-        var configData = this.createDefaultConfig(name, serviceName, filename, true, preDefined || {});
+        var configData = this.createDefaultConfig(name, serviceName, filename, true, preDefined || isUIOnlyFromTheme || {});
         if (configData.recommendedValue) {
           configData.value = configData.recommendedValue;
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/views.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views.js b/ambari-web/app/views.js
index 3e12998..33403a3 100644
--- a/ambari-web/app/views.js
+++ b/ambari-web/app/views.js
@@ -67,6 +67,7 @@ require('views/common/configs/widgets/string_config_widget_view');
 require('views/common/configs/widgets/textfield_config_widget_view');
 require('views/common/configs/widgets/time_interval_spinner_view');
 require('views/common/configs/widgets/toggle_config_widget_view');
+require('views/common/configs/widgets/test_db_connection_widget_view');
 require('views/common/configs/widgets/overrides/config_widget_override_view');
 require('views/common/configs/widgets/comparison/config_widget_comparison_view');
 require('views/common/configs/service_config_layout_tab_view');

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/views/common/configs/service_config_layout_tab_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/service_config_layout_tab_view.js b/ambari-web/app/views/common/configs/service_config_layout_tab_view.js
index b3d69ee..6815a0d 100644
--- a/ambari-web/app/views/common/configs/service_config_layout_tab_view.js
+++ b/ambari-web/app/views/common/configs/service_config_layout_tab_view.js
@@ -64,7 +64,8 @@ App.ServiceConfigLayoutTabView = Em.View.extend(App.ConfigOverridable, {
     'text-field': App.TextFieldConfigWidgetView,
     'time-interval-spinner': App.TimeIntervalSpinnerView,
     toggle: App.ToggleConfigWidgetView,
-    'text-area': App.StringConfigWidgetView
+    'text-area': App.StringConfigWidgetView,
+    'test-db-connection': App.TestDbConnectionWidgetView
   },
 
   /**
@@ -83,8 +84,11 @@ App.ServiceConfigLayoutTabView = Em.View.extend(App.ConfigOverridable, {
       row.forEach(function (section) {
         section.get('subsectionRows').forEach(function (subRow) {
           subRow.forEach(function (subsection) {
+            var subsectionName = subsection.get('name');
+            var uiOnlyConfigs = App.uiOnlyConfigDerivedFromTheme.filterProperty('subSection.name', subsectionName);
+
             subsection.set('configs', []);
-            subsection.get('configProperties').forEach(function (config) {
+            subsection.get('configProperties').toArray().concat(uiOnlyConfigs).forEach(function (config) {
 
               var service = self.get('controller.stepConfigs').findProperty('serviceName', serviceName);
               if (!service) return;
@@ -95,10 +99,22 @@ App.ServiceConfigLayoutTabView = Em.View.extend(App.ConfigOverridable, {
               var configWidgetType = config.get('widget.type');
               var widget = widgetTypeMap[configWidgetType];
               Em.assert('Unknown config widget view for config ' + configProperty.get('id') + ' with type ' + configWidgetType, widget);
-              configProperty.setProperties({
+
+              var additionalProperties = {
                 widget: widget,
                 stackConfigProperty: config
-              });
+              };
+
+              var configConditions = App.ConfigCondition.find().filter(function(_configCondition){
+                var conditionalConfigs = _configCondition.get('configs').filterProperty('fileName', config.get('filename')).filterProperty('configName', config.get('name'));
+                return (conditionalConfigs && conditionalConfigs.length);
+              }, this);
+
+              if (configConditions && configConditions.length) {
+                additionalProperties.configConditions = configConditions;
+              }
+              configProperty.setProperties(additionalProperties);
+
               if (configProperty.get('overrides')) {
                 configProperty.get('overrides').setEach('stackConfigProperty', config);
               }

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/views/common/configs/widgets/config_widget_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/widgets/config_widget_view.js b/ambari-web/app/views/common/configs/widgets/config_widget_view.js
index 354a44e..30c942b 100644
--- a/ambari-web/app/views/common/configs/widgets/config_widget_view.js
+++ b/ambari-web/app/views/common/configs/widgets/config_widget_view.js
@@ -375,6 +375,56 @@ App.ConfigWidgetView = Em.View.extend(App.SupportsDependentConfigs, App.WidgetPo
     this.initIncompatibleWidgetAsTextBox();
   },
 
+  willInsertElement: function() {
+    var configConditions = this.get('config.configConditions');
+    if (configConditions && configConditions.length) {
+      this.configValueObserver();
+      this.addObserver('config.value', this, this.configValueObserver);
+    }
+  },
+
+  willDestroyElement: function() {
+    if (this.get('config.configConditions')) {
+      this.removeObserver('config.value', this, this.configValueObserver);
+    }
+  },
+
+  configValueObserver: function() {
+    var configConditions = this.get('config.configConditions');
+    var serviceName = this.get('config.serviceName');
+    var serviceConfigs = this.get('controller.stepConfigs').findProperty('serviceName',serviceName).get('configs');
+    configConditions.forEach(function(configCondition){
+      var ifCondition =  configCondition.get("if");
+      var conditionalConfigName = configCondition.get("configName");
+      var conditionalConfigFileName = configCondition.get("fileName");
+      var parseIfConditionVal = ifCondition;
+      var regex = /\$\{.*?\}/g;
+      var configStrings = ifCondition.match(regex);
+      configStrings.forEach(function(_configString){
+        var configObject = _configString.substring(2, _configString.length-1).split("/");
+        var config = serviceConfigs.filterProperty('filename',configObject[0] + '.xml').findProperty('name', configObject[1]);
+        if (config) {
+          var configValue = config.get('value');
+          parseIfConditionVal = parseIfConditionVal.replace(_configString, configValue);
+        }
+      }, this);
+
+      var isConditionTrue =  Boolean(window.eval(parseIfConditionVal));
+      var action = isConditionTrue ? configCondition.get("then") : configCondition.get("else");
+      var valueAttributes = action.property_value_attributes;
+      for (var key in valueAttributes) {
+        if (valueAttributes.hasOwnProperty(key)) {
+          var valueAttribute = App.StackConfigValAttributesMap[key] || key;
+          var conditionalConfig = serviceConfigs.filterProperty('filename',conditionalConfigFileName).findProperty('name', conditionalConfigName);
+          if (conditionalConfig) {
+            conditionalConfig.set(valueAttribute, valueAttributes[key]);
+          }
+        }
+      }
+    }, this);
+  },
+
+
   /**
    * set widget value same as config value
    * useful for widgets that work with intermediate config value, not original

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/views/common/configs/widgets/password_config_widget_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/widgets/password_config_widget_view.js b/ambari-web/app/views/common/configs/widgets/password_config_widget_view.js
index d33cd13..fe3cf89 100644
--- a/ambari-web/app/views/common/configs/widgets/password_config_widget_view.js
+++ b/ambari-web/app/views/common/configs/widgets/password_config_widget_view.js
@@ -30,6 +30,7 @@ App.PasswordConfigWidgetView = App.ConfigWidgetView.extend({
   }),
 
   didInsertElement: function() {
+    this._super();
     this.set('config.displayType', this.get('config.stackConfigProperty.widget.type'));
   }
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/app/views/common/configs/widgets/test_db_connection_widget_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/widgets/test_db_connection_widget_view.js b/ambari-web/app/views/common/configs/widgets/test_db_connection_widget_view.js
new file mode 100644
index 0000000..d22cb1f
--- /dev/null
+++ b/ambari-web/app/views/common/configs/widgets/test_db_connection_widget_view.js
@@ -0,0 +1,364 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+require('views/common/controls_view');
+
+var App = require('app');
+
+App.TestDbConnectionWidgetView = App.ConfigWidgetView.extend({
+  templateName: require('templates/common/configs/widgets/test_db_connection_widget'),
+  classNames: ['widget'],
+
+  /** @property {string} btnCaption - text for button **/
+  btnCaption: function () {
+    return this.get('config.stackConfigProperty.widget.display-name');
+  }.property('config.stackConfigProperty.widget.display-name'),
+  /** @property {string} responseCaption - text for status link **/
+  responseCaption: null,
+  /** @property {boolean} isConnecting - is request to server activated **/
+  isConnecting: false,
+  /** @property {boolean} isValidationPassed - check validation for required fields **/
+  isValidationPassed: null,
+  /** @property {string} db_type- name of current database **/
+  db_type: null,
+  /** @property {boolean} isRequestResolved - check for finished request to server **/
+  isRequestResolved: false,
+  /** @property {boolean} isConnectionSuccess - check for successful connection to database **/
+  isConnectionSuccess: null,
+  /** @property {string} responseFromServer - message from server response **/
+  responseFromServer: null,
+  /** @property {Object} ambariRequiredProperties - properties that need for custom action request **/
+  ambariRequiredProperties: null,
+  /** @property {Number} currentRequestId - current custom action request id **/
+  currentRequestId: null,
+  /** @property {Number} currentTaskId - current custom action task id **/
+  currentTaskId: null,
+  /** @property {jQuery.Deferred} request - current $.ajax request **/
+  request: null,
+  /** @property {Number} pollInterval - timeout interval for ajax polling **/
+  pollInterval: 3000,
+  /** @property {Object} logsPopup - popup with DB connection check info **/
+  logsPopup: null,
+  /** @property {Array} or {String} masterHostName: The name of hosts from which the db connection will happen**/
+  masterHostName: null,
+  /** @property {String} db_connection_url: The jdbc urlfor performing db connection**/
+  db_connection_url: null,
+  /** @property {String} user_name: The user name to be used for performing db connection**/
+  user_name: null,
+  /** @property {String} user_passwd: password for the  user name to be used for performing db connection**/
+  user_passwd: null,
+
+  /** @property {boolean} isBtnDisabled - disable button on failed validation or active request **/
+  isBtnDisabled: function () {
+    return !this.get('requiredProperties').everyProperty('isValid') || this.get('isConnecting');
+  }.property('requiredProperties.@each.isValid', 'isConnecting'),
+  /** @property {object} requiredProperties - properties that necessary for database connection **/
+  requiredProperties: [],
+
+  /** Check validation and load ambari properties **/
+  didInsertElement: function () {
+    var requiredProperties = this.get('config.stackConfigProperty.widget.required-properties');
+    var serviceName = this.get('config.serviceName');
+    var serviceConfigs = this.get('controller.stepConfigs').findProperty('serviceName',serviceName).get('configs');
+    var requiredServiceConfigs = Object.keys(requiredProperties).map(function(key){
+      var split = requiredProperties[key].split('/');
+      var fileName =  split[0] + '.xml';
+      var configName = split[1];
+      return serviceConfigs.filterProperty('filename',fileName).findProperty('name', configName);
+    }, this);
+
+    this.set('requiredProperties', requiredServiceConfigs);
+    this.setDbProperties(requiredProperties);
+    this.getAmbariProperties();
+  },
+
+  /** On view destroy **/
+  willDestroyElement: function () {
+    this.set('isConnecting', false);
+    this._super();
+  },
+
+
+  /**
+   *  This function is used to set Database name and master host name
+   * @param requiredProperties
+   */
+  setDbProperties: function(requiredProperties) {
+    var dbInfo = require('data/db_properties_info');
+    var dbProperties = {
+      'db.connection.source.host' : 'masterHostName',
+      'db.type' : 'db_type',
+      'db.connection.user': 'user_name',
+      'db.connection.password': 'user_passwd',
+      'jdbc.driver.url': 'db_connection_url'
+    };
+
+    for (var key in dbProperties) {
+      var masterHostNameProperty = requiredProperties[key];
+      var split = masterHostNameProperty.split('/');
+      var fileName =  split[0] + '.xml';
+      var configName =  split[1];
+      var dbConfig = this.get('requiredProperties').filterProperty('filename',fileName).findProperty('name', configName);
+      if (key === 'db.type') {
+        dbConfig = dbInfo.dpPropertiesMap[dbConfig.value].db_type.toUpperCase();
+      }
+      this.set(dbProperties[key], dbConfig);
+    }
+  },
+
+
+  /**
+   * Set up ambari properties required for custom action request
+   *
+   * @method getAmbariProperties
+   **/
+  getAmbariProperties: function () {
+    var clusterController = App.router.get('clusterController');
+    var _this = this;
+    if (!App.isEmptyObject(App.db.get('tmp', 'ambariProperties')) && !this.get('ambariProperties')) {
+      this.set('ambariProperties', App.db.get('tmp', 'ambariProperties'));
+      return;
+    }
+    if (App.isEmptyObject(clusterController.get('ambariProperties'))) {
+      clusterController.loadAmbariProperties().done(function (data) {
+        _this.formatAmbariProperties(data.RootServiceComponents.properties);
+      });
+    } else {
+      this.formatAmbariProperties(clusterController.get('ambariProperties'));
+    }
+  },
+
+  formatAmbariProperties: function (properties) {
+    var defaults = {
+      threshold: "60",
+      ambari_server_host: location.hostname,
+      check_execute_list: "db_connection_check"
+    };
+    var properties = App.permit(properties, ['jdk.name', 'jdk_location', 'java.home']);
+    var renameKey = function (oldKey, newKey) {
+      if (properties[oldKey]) {
+        defaults[newKey] = properties[oldKey];
+        delete properties[oldKey];
+      }
+    };
+    renameKey('java.home', 'java_home');
+    renameKey('jdk.name', 'jdk_name');
+    $.extend(properties, defaults);
+    App.db.set('tmp', 'ambariProperties', properties);
+    this.set('ambariProperties', properties);
+  },
+  /**
+   * `Action` method for starting connect to current database.
+   *
+   * @method connectToDatabase
+   **/
+  connectToDatabase: function () {
+    if (this.get('isBtnDisabled')) return;
+    this.set('isRequestResolved', false);
+    this.setConnectingStatus(true);
+    if (App.get('testMode')) {
+      this.startPolling();
+    } else {
+      this.runCheckConnection();
+    }
+  },
+
+  /**
+   * runs check connections methods depending on service
+   * @return {void}
+   * @method runCheckConnection
+   */
+  runCheckConnection: function () {
+    this.createCustomAction();
+  },
+
+
+  /**
+   * Run custom action for database connection.
+   *
+   * @method createCustomAction
+   **/
+  createCustomAction: function () {
+    var connectionProperties = this.getProperties('db_connection_url','user_name', 'user_passwd');
+    for (var key in connectionProperties) {
+      if (connectionProperties.hasOwnProperty(key)) {
+        connectionProperties[key] = connectionProperties[key].value;
+      }
+    }
+    var params = $.extend(true, {}, {db_name: this.get('db_type').toLowerCase()}, connectionProperties, this.get('ambariProperties'));
+    var filteredHosts =  Array.isArray(this.get('masterHostName.value')) ? this.get('masterHostName.value') : [this.get('masterHostName.value')];
+    App.ajax.send({
+      name: 'custom_action.create',
+      sender: this,
+      data: {
+        requestInfo: {
+          parameters: params
+        },
+        filteredHosts: filteredHosts
+      },
+      success: 'onCreateActionSuccess',
+      error: 'onCreateActionError'
+    });
+  },
+  /**
+   * Run updater if task is created successfully.
+   *
+   * @method onConnectActionS
+   **/
+  onCreateActionSuccess: function (data) {
+    this.set('currentRequestId', data.Requests.id);
+    App.ajax.send({
+      name: 'custom_action.request',
+      sender: this,
+      data: {
+        requestId: this.get('currentRequestId')
+      },
+      success: 'setCurrentTaskId'
+    });
+  },
+
+  setCurrentTaskId: function (data) {
+    this.set('currentTaskId', data.items[0].Tasks.id);
+    this.startPolling();
+  },
+
+  startPolling: function () {
+    if (this.get('isConnecting'))
+      this.getTaskInfo();
+  },
+
+  getTaskInfo: function () {
+    var request = App.ajax.send({
+      name: 'custom_action.request',
+      sender: this,
+      data: {
+        requestId: this.get('currentRequestId'),
+        taskId: this.get('currentTaskId')
+      },
+      success: 'getTaskInfoSuccess'
+    });
+    this.set('request', request);
+  },
+
+  getTaskInfoSuccess: function (data) {
+    var task = data.Tasks;
+    this.set('responseFromServer', {
+      stderr: task.stderr,
+      stdout: task.stdout
+    });
+    if (task.status === 'COMPLETED') {
+      var structuredOut = task.structured_out.db_connection_check;
+      if (structuredOut.exit_code != 0) {
+        this.set('responseFromServer', {
+          stderr: task.stderr,
+          stdout: task.stdout,
+          structuredOut: structuredOut.message
+        });
+        this.setResponseStatus('failed');
+      } else {
+        this.setResponseStatus('success');
+      }
+    }
+    if (task.status === 'FAILED') {
+      this.setResponseStatus('failed');
+    }
+    if (/PENDING|QUEUED|IN_PROGRESS/.test(task.status)) {
+      Em.run.later(this, function () {
+        this.startPolling();
+      }, this.get('pollInterval'));
+    }
+  },
+
+  onCreateActionError: function (jqXhr, status, errorMessage) {
+    this.setResponseStatus('failed');
+    this.set('responseFromServer', errorMessage);
+  },
+
+  setResponseStatus: function (isSuccess) {
+    var isSuccess = isSuccess == 'success';
+    this.setConnectingStatus(false);
+    this.set('responseCaption', isSuccess ? Em.I18n.t('services.service.config.database.connection.success') : Em.I18n.t('services.service.config.database.connection.failed'));
+    this.set('isConnectionSuccess', isSuccess);
+    this.set('isRequestResolved', true);
+    if (this.get('logsPopup')) {
+      var statusString = isSuccess ? 'common.success' : 'common.error';
+      this.set('logsPopup.header', Em.I18n.t('services.service.config.connection.logsPopup.header').format(this.get('db_type'), Em.I18n.t(statusString)));
+    }
+  },
+  /**
+   * Switch captions and statuses for active/non-active request.
+   *
+   * @method setConnectionStatus
+   * @param {Boolean} [active]
+   */
+  setConnectingStatus: function (active) {
+    if (active) {
+      this.set('responseCaption', Em.I18n.t('services.service.config.database.connection.inProgress'));
+    }
+    this.set('controller.testConnectionInProgress', !!active);
+    this.set('btnCaption', !!active ? Em.I18n.t('services.service.config.database.btn.connecting') : Em.I18n.t('services.service.config.database.btn.idle'));
+    this.set('isConnecting', !!active);
+  },
+  /**
+   * Set view to init status.
+   *
+   * @method restore
+   **/
+  restore: function () {
+    if (this.get('request')) {
+      this.get('request').abort();
+      this.set('request', null);
+    }
+    this.set('responseCaption', null);
+    this.set('responseFromServer', null);
+    this.setConnectingStatus(false);
+    this.set('isRequestResolved', false);
+  },
+  /**
+   * `Action` method for showing response from server in popup.
+   *
+   * @method showLogsPopup
+   **/
+  showLogsPopup: function () {
+    if (this.get('isConnectionSuccess')) return;
+    var _this = this;
+    var statusString = this.get('isRequestResolved') ? 'common.error' : 'common.testing';
+    var popup = App.showAlertPopup(Em.I18n.t('services.service.config.connection.logsPopup.header').format(this.get('db_type'), Em.I18n.t(statusString)), null, function () {
+      _this.set('logsPopup', null);
+    });
+    popup.reopen({
+      onClose: function () {
+        this._super();
+        _this.set('logsPopup', null);
+      }
+    });
+    if (typeof this.get('responseFromServer') == 'object') {
+      popup.set('bodyClass', Em.View.extend({
+        checkDBConnectionView: _this,
+        templateName: require('templates/common/error_log_body'),
+        openedTask: function () {
+          return this.get('checkDBConnectionView.responseFromServer');
+        }.property('checkDBConnectionView.responseFromServer.stderr', 'checkDBConnectionView.responseFromServer.stdout', 'checkDBConnectionView.responseFromServer.structuredOut')
+      }));
+    } else {
+      popup.set('body', this.get('responseFromServer'));
+    }
+    this.set('logsPopup', popup);
+    return popup;
+  }
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/test/mappers/configs/themes_mapper_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/mappers/configs/themes_mapper_test.js b/ambari-web/test/mappers/configs/themes_mapper_test.js
index 7e6d35f..99456e0 100644
--- a/ambari-web/test/mappers/configs/themes_mapper_test.js
+++ b/ambari-web/test/mappers/configs/themes_mapper_test.js
@@ -188,6 +188,8 @@ describe('App.themeMapper', function () {
         "row_index": "0",
         "row_span": "1",
         "column_index": "0",
+        "depends_on": [],
+        "left_vertical_splitter": true,
         "column_span": "1",
         "section_id": "Section-1"
       });

http://git-wip-us.apache.org/repos/asf/ambari/blob/61540bbb/ambari-web/test/models/configs/section_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/models/configs/section_test.js b/ambari-web/test/models/configs/section_test.js
index c8f2ebf..055c532 100644
--- a/ambari-web/test/models/configs/section_test.js
+++ b/ambari-web/test/models/configs/section_test.js
@@ -56,24 +56,24 @@ describe('App.Section', function () {
         },
         {
           subSections: [
-            App.SubSection.createRecord({configs: [{isHiddenByFilter: false}, {isHiddenByFilter: false}]}),
-            App.SubSection.createRecord({configs: [{isHiddenByFilter: false}, {isHiddenByFilter: false}]})
+            App.SubSection.createRecord({configs: [{isHiddenByFilter: false, isVisible: true}, {isHiddenByFilter: false, isVisible: true}]}),
+            App.SubSection.createRecord({configs: [{isHiddenByFilter: false, isVisible: true}, {isHiddenByFilter: false, isVisible: true}]})
           ],
           m: 'no subsections are hidden',
           e: false
         },
         {
           subSections: [
-            App.SubSection.createRecord({configs: [{isHiddenByFilter: true}, {isHiddenByFilter: true}]}),
-            App.SubSection.createRecord({configs: [{isHiddenByFilter: false}, {isHiddenByFilter: false}]})
+            App.SubSection.createRecord({configs: [{isHiddenByFilter: true, isVisible: true}, {isHiddenByFilter: true, isVisible: true}]}),
+            App.SubSection.createRecord({configs: [{isHiddenByFilter: false, isVisible: true}, {isHiddenByFilter: false, isVisible: true}]})
           ],
           m: 'one subsection is hidden',
           e: false
         },
         {
           subSections: [
-            App.SubSection.createRecord({configs: [{isHiddenByFilter: true}, {isHiddenByFilter: true}]}),
-            App.SubSection.createRecord({configs: [{isHiddenByFilter: true}, {isHiddenByFilter: true}]})
+            App.SubSection.createRecord({configs: [{isHiddenByFilter: true, isVisible: true}, {isHiddenByFilter: true, isVisible: true}]}),
+            App.SubSection.createRecord({configs: [{isHiddenByFilter: true, isVisible: true}, {isHiddenByFilter: true, isVisible: true}]})
           ],
           m: 'all subsections are hidden',
           e: true


[20/50] [abbrv] ambari git commit: AMBARI-12510. ambari-server failed to properly detect of postgresql database status on RHEL 7.x (Tuong Truong via smohanty)

Posted by nc...@apache.org.
AMBARI-12510. ambari-server failed to properly detect of postgresql database status on RHEL 7.x (Tuong Truong via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7e2a1c0e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7e2a1c0e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7e2a1c0e

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 7e2a1c0e24ed9bf1656a31b80b6e2d752739a350
Parents: 67b9d48
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Fri Sep 25 15:38:07 2015 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Fri Sep 25 15:38:07 2015 -0700

----------------------------------------------------------------------
 .../main/python/ambari_server/dbConfiguration_linux.py  | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7e2a1c0e/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py b/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py
index 0abd28e..b2e9508 100644
--- a/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py
+++ b/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py
@@ -551,10 +551,14 @@ class PGConfig(LinuxDBMSConfig):
   @staticmethod
   def _get_postgre_status():
     retcode, out, err = run_os_command(PGConfig.PG_ST_CMD)
-    try:
-      pg_status = re.search('(stopped|running)', out, re.IGNORECASE).group(0).lower()
-    except AttributeError:
-      pg_status = None
+    # on RHEL and SUSE PG_ST_COMD returns RC 0 for running and 3 for stoppped
+    if retcode == 0:
+      pg_status = PGConfig.PG_STATUS_RUNNING
+    else:
+      if retcode == 3:
+        pg_status = "stopped"
+      else:
+        pg_status = None
     return pg_status, retcode, out, err
 
   @staticmethod


[05/50] [abbrv] ambari git commit: AMBARI-13222. kdc_type lost when updating kerberos-env via Kerberos service configuration page. (rlevas via yusaku)

Posted by nc...@apache.org.
AMBARI-13222. kdc_type lost when updating kerberos-env via Kerberos service configuration page. (rlevas via yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4515215a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4515215a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4515215a

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 4515215a24c066f91218b2815838e920edf5b01a
Parents: a830156
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Thu Sep 24 14:07:35 2015 -0700
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Thu Sep 24 14:07:35 2015 -0700

----------------------------------------------------------------------
 ambari-web/app/mixins/common/configs/configs_saver.js | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4515215a/ambari-web/app/mixins/common/configs/configs_saver.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/configs/configs_saver.js b/ambari-web/app/mixins/common/configs/configs_saver.js
index 2b97f59..6352dec 100644
--- a/ambari-web/app/mixins/common/configs/configs_saver.js
+++ b/ambari-web/app/mixins/common/configs/configs_saver.js
@@ -444,6 +444,7 @@ App.ConfigsSaverMixin = Em.Mixin.create({
   formatValueBeforeSave: function(property) {
     var name = property.get('name');
     var value = property.get('value');
+    var kdcTypesMap = App.router.get('mainAdminKerberosController.kdcTypesValues');
     //TODO check for core-site
     if (this.get('heapsizeRegExp').test(name) && !this.get('heapsizeException').contains(name) && !(value).endsWith("m")) {
       return value += "m";
@@ -453,7 +454,9 @@ App.ConfigsSaverMixin = Em.Mixin.create({
     }
     switch (name) {
       case 'kdc_type':
-        return App.router.get('mainAdminKerberosController.kdcTypesValues')[property.get('value')];
+        return Em.keys(kdcTypesMap).filter(function(key) {
+            return kdcTypesMap[key] === property.get('value');
+        })[0];
       case 'storm.zookeeper.servers':
       case 'nimbus.seeds':
         if (Em.isArray(value)) {


[30/50] [abbrv] ambari git commit: AMBARI-13254 Components filter works incorrectly on Hosts page. (atkach)

Posted by nc...@apache.org.
AMBARI-13254 Components filter works incorrectly on Hosts page. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/291b7cbf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/291b7cbf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/291b7cbf

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 291b7cbf5852db3fa37f4f180158d0958241e05b
Parents: a00b06e
Author: Andrii Tkach <at...@hortonworks.com>
Authored: Mon Sep 28 14:06:06 2015 +0300
Committer: Andrii Tkach <at...@hortonworks.com>
Committed: Mon Sep 28 14:06:06 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/controllers/global/update_controller.js | 6 ------
 1 file changed, 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/291b7cbf/ambari-web/app/controllers/global/update_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/global/update_controller.js b/ambari-web/app/controllers/global/update_controller.js
index 084425d..69deb74 100644
--- a/ambari-web/app/controllers/global/update_controller.js
+++ b/ambari-web/app/controllers/global/update_controller.js
@@ -339,12 +339,6 @@ App.UpdateController = Em.Controller.extend({
     var hostNames = data.items.mapProperty('Hosts.host_name');
     var skipCall = hostNames.length === 0;
 
-    /**
-     * exclude pagination parameters as they were applied in previous call
-     * to obtain hostnames of filtered hosts
-     */
-    preLoadKeys = preLoadKeys.concat(this.get('paginationKeys'));
-
     var itemTotal = parseInt(data.itemTotal);
     if (!isNaN(itemTotal)) {
       App.router.set('mainHostController.filteredCount', itemTotal);


[03/50] [abbrv] ambari git commit: AMBARI-13231: Change default values from SQLA to SQL Anywhere (jluniya)

Posted by nc...@apache.org.
AMBARI-13231: Change default values from SQLA to SQL Anywhere (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/eb2e6158
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/eb2e6158
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/eb2e6158

Branch: refs/heads/branch-dev-patch-upgrade
Commit: eb2e61585e491555d214103ecbef53b2d0819fe0
Parents: d66b4de
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Thu Sep 24 12:00:29 2015 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Thu Sep 24 12:00:29 2015 -0700

----------------------------------------------------------------------
 .../pluggable_stack_definition/configs/SAPHD.json       |  4 ++--
 .../ambari/server/topology/BlueprintValidatorImpl.java  |  8 ++++----
 .../HDP/2.3/services/HIVE/configuration/hive-site.xml   |  2 +-
 .../resources/stacks/HDP/2.3/services/stack_advisor.py  |  8 ++++----
 .../server/topology/BlueprintValidatorImplTest.java     |  8 ++++----
 .../stacks/2.0.6/configs/oozie_existing_sqla.json       |  2 +-
 .../test/python/stacks/2.1/HIVE/test_hive_metastore.py  |  4 ++--
 ambari-web/app/data/HDP2.3/site_properties.js           |  2 +-
 ambari-web/app/data/db_properties_info.js               |  7 +++++++
 ambari-web/app/views/common/controls_view.js            | 12 ++++++++----
 10 files changed, 34 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/eb2e6158/ambari-common/src/main/python/pluggable_stack_definition/configs/SAPHD.json
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/pluggable_stack_definition/configs/SAPHD.json b/ambari-common/src/main/python/pluggable_stack_definition/configs/SAPHD.json
index 3ef46d8..27c6995 100644
--- a/ambari-common/src/main/python/pluggable_stack_definition/configs/SAPHD.json
+++ b/ambari-common/src/main/python/pluggable_stack_definition/configs/SAPHD.json
@@ -34,7 +34,7 @@
               "name": "hive-env",
               "properties": {
                 "hive_database_type": "sqlanywhere",
-                "hive_database": "Existing SQLA Database"
+                "hive_database": "Existing SQL Anywhere Database"
               }
             },
             {
@@ -57,7 +57,7 @@
             {
               "name": "oozie-env",
               "properties": {
-                "oozie_database": "Existing SQLA Database"
+                "oozie_database": "Existing SQL Anywhere Database"
               }
             },
             {

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb2e6158/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java
index 1b3a910..9e8f163 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java
@@ -103,10 +103,10 @@ public class BlueprintValidatorImpl implements BlueprintValidator {
         if (component.equals("HIVE_METASTORE")) {
           Map<String, String> hiveEnvConfig = clusterConfigurations.get("hive-env");
           if (hiveEnvConfig != null && !hiveEnvConfig.isEmpty() && hiveEnvConfig.get("hive_database") !=null
-                  && hiveEnvConfig.get("hive_database").equals("Existing SQLA Database")
+                  && hiveEnvConfig.get("hive_database").equals("Existing SQL Anywhere Database")
                   && VersionUtils.compareVersions(stack.getVersion(), "2.3.0.0") < 0
                   && stack.getName().equalsIgnoreCase("HDP")) {
-            throw new InvalidTopologyException("Incorrect configuration: SQLA db is available only for stack HDP-2.3+ " +
+            throw new InvalidTopologyException("Incorrect configuration: SQL Anywhere db is available only for stack HDP-2.3+ " +
                     "and repo version 2.3.2+!");
           }
         }
@@ -114,10 +114,10 @@ public class BlueprintValidatorImpl implements BlueprintValidator {
         if (component.equals("OOZIE_SERVER")) {
           Map<String, String> oozieEnvConfig = clusterConfigurations.get("oozie-env");
           if (oozieEnvConfig != null && !oozieEnvConfig.isEmpty() && oozieEnvConfig.get("oozie_database") !=null
-                  && oozieEnvConfig.get("oozie_database").equals("Existing SQLA Database")
+                  && oozieEnvConfig.get("oozie_database").equals("Existing SQL Anywhere Database")
                   && VersionUtils.compareVersions(stack.getVersion(), "2.3.0.0") < 0
                   && stack.getName().equalsIgnoreCase("HDP")) {
-            throw new InvalidTopologyException("Incorrect configuration: SQLA db is available only for stack HDP-2.3+ " +
+            throw new InvalidTopologyException("Incorrect configuration: SQL Anywhere db is available only for stack HDP-2.3+ " +
                     "and repo version 2.3.2+!");
           }
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb2e6158/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/hive-site.xml
index 700c958..5d0f5e0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/hive-site.xml
@@ -31,7 +31,7 @@ limitations under the License.
 
   <property>
     <name>datanucleus.rdbms.datastoreAdapterClassName</name>
-    <description>Datanucleus Class, This property used only when hive db is SQLA</description>
+    <description>Datanucleus Class, This property used only when hive db is SQL Anywhere</description>
     <depends-on>
       <property>
         <type>hive-env</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb2e6158/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index 16fb7a5..adb6689 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -182,7 +182,7 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
 
     # if hive using sqla db, then we should add DataNucleus property
     sqla_db_used = 'hive-env' in services['configurations'] and 'hive_database' in services['configurations']['hive-env']['properties'] and \
-                   services['configurations']['hive-env']['properties']['hive_database'] == 'Existing SQLA Database'
+                   services['configurations']['hive-env']['properties']['hive_database'] == 'Existing SQL Anywhere Database'
     if sqla_db_used:
       putHiveSiteProperty('datanucleus.rdbms.datastoreAdapterClassName','org.datanucleus.store.rdbms.adapter.SQLAnywhereAdapter')
     else:
@@ -309,19 +309,19 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
     hive_env_properties = getSiteProperties(configurations, "hive-env")
     validationItems = []
     sqla_db_used = "hive_database" in hive_env_properties and \
-                   hive_env_properties['hive_database'] == 'Existing SQLA Database'
+                   hive_env_properties['hive_database'] == 'Existing SQL Anywhere Database'
     prop_name = "datanucleus.rdbms.datastoreAdapterClassName"
     prop_value = "org.datanucleus.store.rdbms.adapter.SQLAnywhereAdapter"
     if sqla_db_used:
       if not prop_name in hive_site:
         validationItems.append({"config-name": prop_name,
                               "item": self.getWarnItem(
-                              "If Hive using SQLA db." \
+                              "If Hive using SQL Anywhere db." \
                               " {0} needs to be added with value {1}".format(prop_name,prop_value))})
       elif prop_name in hive_site and hive_site[prop_name] != "org.datanucleus.store.rdbms.adapter.SQLAnywhereAdapter":
         validationItems.append({"config-name": prop_name,
                                 "item": self.getWarnItem(
-                                  "If Hive using SQLA db." \
+                                  "If Hive using SQL Anywhere db." \
                                   " {0} needs to be set to {1}".format(prop_name,prop_value))})
     return self.toConfigurationValidationProblems(validationItems, "hive-site")
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb2e6158/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintValidatorImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintValidatorImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintValidatorImplTest.java
index cc2b189..8ab4ba1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintValidatorImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintValidatorImplTest.java
@@ -181,8 +181,8 @@ public class BlueprintValidatorImplTest{
 
   @Test(expected=InvalidTopologyException.class)
   public void testValidateRequiredProperties_SqlaInHiveStackHdp22() throws Exception {
-    Map<String, String> hiveEnvConfig = new HashMap<>();
-    hiveEnvConfig.put("hive_database","Existing SQLA Database");
+    Map<String, String> hiveEnvConfig = new HashMap<String, String>();
+    hiveEnvConfig.put("hive_database","Existing SQL Anywhere Database");
     configProperties.put("hive-env", hiveEnvConfig);
 
     group1Components.add("HIVE_METASTORE");
@@ -204,8 +204,8 @@ public class BlueprintValidatorImplTest{
 
   @Test(expected=InvalidTopologyException.class)
   public void testValidateRequiredProperties_SqlaInOozieStackHdp22() throws Exception {
-    Map<String, String> hiveEnvConfig = new HashMap<>();
-    hiveEnvConfig.put("oozie_database","Existing SQLA Database");
+    Map<String, String> hiveEnvConfig = new HashMap<String, String>();
+    hiveEnvConfig.put("oozie_database","Existing SQL Anywhere Database");
     configProperties.put("oozie-env", hiveEnvConfig);
 
     group1Components.add("OOZIE_SERVER");

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb2e6158/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
index 30adf0e..01856df 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
@@ -553,7 +553,7 @@
             "content": "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}\n  export CATALINA_BASE=${CATALINA_BASE:-/var/lib/oozie/oozie-server}\n  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n# export CATALINA_OPTS=\n\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie config
 uration directory\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64",
             "oozie_user": "oozie",
             "oozie_admin_users": "{oozie_user}, {oozie_user}-admin",
-            "oozie_database": "Existing SQLA Database",
+            "oozie_database": "Existing SQL Anywhere Database",
             "oozie_data_dir": "/hadoop/oozie/data",
             "oozie_log_dir": "/var/log/oozie"
         },

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb2e6158/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
index 8c8795c..c5566a8 100644
--- a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
+++ b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
@@ -607,7 +607,7 @@ class TestHiveMetastore(RMFTestCase):
     # trigger the code to think it needs to copy the JAR
     json_content['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName'] = "sap.jdbc4.sqlanywhere.IDriver"
     json_content['configurations']['hive-env']['hive_database'] = "Existing"
-    json_content['configurations']['hive-env']['hive_database_type'] = "sqla"
+    json_content['configurations']['hive-env']['hive_database_type'] = "sqlanywhere"
 
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
@@ -667,7 +667,7 @@ class TestHiveMetastore(RMFTestCase):
                               mode = 0644,
                               )
 
-    self.assertResourceCalled('Execute', "/usr/hdp/2.3.0.0-1234/hive/bin/schematool -dbType sqla -upgradeSchema",
+    self.assertResourceCalled('Execute', "/usr/hdp/2.3.0.0-1234/hive/bin/schematool -dbType sqlanywhere -upgradeSchema",
                               logoutput = True, environment = {'HIVE_CONF_DIR': '/usr/hdp/current/hive-server2/conf/conf.server'},
                               tries = 1, user = 'hive')
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb2e6158/ambari-web/app/data/HDP2.3/site_properties.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/HDP2.3/site_properties.js b/ambari-web/app/data/HDP2.3/site_properties.js
index 4ea661d..adf8cae 100644
--- a/ambari-web/app/data/HDP2.3/site_properties.js
+++ b/ambari-web/app/data/HDP2.3/site_properties.js
@@ -82,7 +82,7 @@ hdp23properties.push({
         displayName: 'MSSQL'
       },
       {
-        displayName: 'SQL Anywhere',
+        displayName: 'SQLA',
         hidden: App.get('currentStackName') !== 'SAPHD' && App.get('currentStackName') !== 'HDP'
       }
     ],

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb2e6158/ambari-web/app/data/db_properties_info.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/db_properties_info.js b/ambari-web/app/data/db_properties_info.js
index 6ee7770..7d6d2f2 100644
--- a/ambari-web/app/data/db_properties_info.js
+++ b/ambari-web/app/data/db_properties_info.js
@@ -98,6 +98,13 @@ module.exports = {
       'sql_jar_connector': '/usr/share/java/sqljdbc4.jar',
       'db_type': 'mssql'
     },
+    /** TODO: Remove SQLA from the list of databases once Ranger DB_FLAVOR=SQLA is replaced with SQL Anywhere */
+    'SQLA': {
+      'connection_url': 'jdbc:sqlanywhere:host={0};database={1}',
+      'driver': 'sap.jdbc4.sqlanywhere.IDriver',
+      'sql_jar_connector': '/path_to_driver/sqla-client-jdbc.tar.gz',
+      'db_type': 'sqlanywhere'
+    },
     'ANYWHERE': {
       'connection_url': 'jdbc:sqlanywhere:host={0};database={1}',
       'driver': 'sap.jdbc4.sqlanywhere.IDriver',

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb2e6158/ambari-web/app/views/common/controls_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/controls_view.js b/ambari-web/app/views/common/controls_view.js
index e079b57..8d1365a 100644
--- a/ambari-web/app/views/common/controls_view.js
+++ b/ambari-web/app/views/common/controls_view.js
@@ -481,7 +481,8 @@ App.ServiceConfigRadioButtons = Ember.View.extend(App.ServiceConfigCalculateId,
    */
   getDbTypeFromRadioValue: function() {
     var currentValue = this.get('serviceConfig.value');
-    var databases = /MySQL|Postgres|Oracle|Derby|MSSQL|Anywhere/gi;
+    /** TODO: Remove SQLA from the list of databases once Ranger DB_FLAVOR=SQLA is replaced with SQL Anywhere */
+    var databases = /MySQL|Postgres|Oracle|Derby|MSSQL|SQLA|Anywhere/gi;
     if (this.get('inMSSQLWithIA')) {
       return 'MSSQL2';
     } else {
@@ -649,9 +650,11 @@ App.ServiceConfigRadioButtons = Ember.View.extend(App.ServiceConfigCalculateId,
     }
     var handledProperties = ['oozie_database', 'hive_database', 'DB_FLAVOR'];
     var currentValue = this.get('serviceConfig.value');
-    var databases = /MySQL|PostgreSQL|Postgres|Oracle|Derby|MSSQL|Anywhere/gi;
+    /** TODO: Remove SQLA from the list of databases once Ranger DB_FLAVOR=SQLA is replaced with SQL Anywhere */
+    var databases = /MySQL|PostgreSQL|Postgres|Oracle|Derby|MSSQL|SQLA|Anywhere/gi;
     var currentDB = currentValue.match(databases)[0];
-    var databasesTypes = /MySQL|Postgres|Oracle|Derby|MSSQL|Anywhere/gi;
+    /** TODO: Remove SQLA from the list of databases once Ranger DB_FLAVOR=SQLA is replaced with SQL Anywhere */
+    var databasesTypes = /MySQL|Postgres|Oracle|Derby|MSSQL|SQLA|Anywhere/gi;
     var currentDBType = currentValue.match(databasesTypes)[0];
     var checkDatabase = /existing/gi.test(currentValue);
     // db connection check button show up if existed db selected
@@ -1110,7 +1113,8 @@ App.CheckDBConnectionView = Ember.View.extend({
 
     if (this.get('parentView.service.serviceName') === 'RANGER') {
       var dbFlavor = this.get('parentView.categoryConfigsAll').findProperty('name','DB_FLAVOR').get('value'),
-        databasesTypes = /MYSQL|POSTGRES|ORACLE|MSSQL|Anywhere/gi,
+        /** TODO: Remove SQLA from the list of databases once Ranger DB_FLAVOR=SQLA is replaced with SQL Anywhere */
+        databasesTypes = /MYSQL|POSTGRES|ORACLE|MSSQL|SQLA|Anywhere/gi,
         dbType = dbFlavor.match(databasesTypes)?dbFlavor.match(databasesTypes)[0].toLowerCase():'';
 
       if (dbType==='oracle') {


[48/50] [abbrv] ambari git commit: AMBARI-13275. Comparing of config versions works incorrectly for DBs (onechiporenko)

Posted by nc...@apache.org.
AMBARI-13275. Comparing of config versions works incorrectly for DBs (onechiporenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e203fae4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e203fae4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e203fae4

Branch: refs/heads/branch-dev-patch-upgrade
Commit: e203fae4d8b37f1438fea93c9da7c72b504d613e
Parents: 50099dd
Author: Oleg Nechiporenko <on...@apache.org>
Authored: Wed Sep 30 15:14:02 2015 +0300
Committer: Oleg Nechiporenko <on...@apache.org>
Committed: Wed Sep 30 15:14:02 2015 +0300

----------------------------------------------------------------------
 .../common/configs/compare_property.hbs         |  2 +-
 ambari-web/app/views/common/controls_view.js    | 14 +++-
 .../test/views/common/controls_view_test.js     | 71 ++++++++++++++++++++
 3 files changed, 85 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e203fae4/ambari-web/app/templates/common/configs/compare_property.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/configs/compare_property.hbs b/ambari-web/app/templates/common/configs/compare_property.hbs
index f593825..29c6ed7 100644
--- a/ambari-web/app/templates/common/configs/compare_property.hbs
+++ b/ambari-web/app/templates/common/configs/compare_property.hbs
@@ -18,7 +18,7 @@
 
 {{#each compareConfig in view.serviceConfigProperty.compareConfigs}}
     <div {{bindAttr class=":control-group :overrideField"}}>
-      {{view compareConfig.viewClass serviceConfigBinding="compareConfig" categoryConfigsAllBinding="view.parentView.categoryConfigsAll"}}
+      {{view compareConfig.viewClass serviceConfigBinding="compareConfig" versionBinding="compareConfig.serviceVersion.version" categoryConfigsAllBinding="view.parentView.categoryConfigsAll"}}
       <span class="label label-info">{{compareConfig.serviceVersion.versionText}}</span>
       {{#if compareConfig.serviceVersion.isCurrent}}
         <span class="label label-success">{{t common.current}}</span>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e203fae4/ambari-web/app/views/common/controls_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/controls_view.js b/ambari-web/app/views/common/controls_view.js
index 17fab9f..28d3090 100644
--- a/ambari-web/app/views/common/controls_view.js
+++ b/ambari-web/app/views/common/controls_view.js
@@ -520,7 +520,19 @@ App.ServiceConfigRadioButtons = Ember.View.extend(App.ServiceConfigCalculateId,
     }
   }.observes('databaseProperty.value', 'hostNameProperty.value', 'serviceConfig.value'),
 
-  nameBinding: 'serviceConfig.radioName',
+  name: function () {
+    var name = this.get('serviceConfig.radioName');
+    if (!this.get('serviceConfig.isOriginalSCP')) {
+      if (this.get('serviceConfig.isComparison')) {
+        var version = this.get('serviceConfig.compareConfigs') ? this.get('controller.selectedVersion') : this.get('version');
+        name += '-v' + version;
+      } else {
+        var group = this.get('serviceConfig.group.name');
+        name += '-' + group;
+      }
+    }
+    return name;
+  }.property('serviceConfig.radioName'),
 
   /**
    * Just property object for database name

http://git-wip-us.apache.org/repos/asf/ambari/blob/e203fae4/ambari-web/test/views/common/controls_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/common/controls_view_test.js b/ambari-web/test/views/common/controls_view_test.js
index fdf3739..b1914c0 100644
--- a/ambari-web/test/views/common/controls_view_test.js
+++ b/ambari-web/test/views/common/controls_view_test.js
@@ -333,6 +333,77 @@ describe('App.ServiceConfigRadioButtons', function () {
     });
 
   });
+
+  describe('#name', function () {
+
+    var cases = [
+      {
+        serviceConfig: {
+          radioName: 'n0',
+          isOriginalSCP: true,
+          isComparison: false
+        },
+        name: 'n0',
+        title: 'original value'
+      },
+      {
+        serviceConfig: {
+          radioName: 'n1',
+          isOriginalSCP: false,
+          isComparison: true,
+          compareConfigs: []
+        },
+        controller: {
+          selectedVersion: 1
+        },
+        name: 'n1-v1',
+        title: 'comparison view, original value'
+      },
+      {
+        serviceConfig: {
+          radioName: 'n2',
+          isOriginalSCP: false,
+          isComparison: true,
+          compareConfigs: null
+        },
+        version: 2,
+        name: 'n2-v2',
+        title: 'comparison view, value to be compared with'
+      },
+      {
+        serviceConfig: {
+          radioName: 'n3',
+          isOriginalSCP: false,
+          isComparison: false,
+          group: {
+            name: 'g'
+          }
+        },
+        name: 'n3-g',
+        title: 'override value'
+      }
+    ];
+
+    beforeEach(function () {
+      view.reopen({
+        serviceConfig: Em.Object.create()
+      });
+    });
+
+    cases.forEach(function (item) {
+      it(item.title, function () {
+        if (item.controller) {
+          view.reopen({
+            controller: item.controller
+          });
+        }
+        view.set('version', item.version);
+        view.get('serviceConfig').setProperties(item.serviceConfig);
+        expect(view.get('name')).to.equal(item.name);
+      });
+    });
+
+  });
 });
 
 describe('App.ServiceConfigRadioButton', function () {


[06/50] [abbrv] ambari git commit: AMBARI-13203. Ambari Pig View - Explain PigScript - Script Details - Shows the pig command instead of pig script. (Nitiraj Singh Rathore via yusaku)

Posted by nc...@apache.org.
AMBARI-13203. Ambari Pig View - Explain PigScript - Script Details - Shows the pig command instead of pig script. (Nitiraj Singh Rathore via yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/93f86a48
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/93f86a48
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/93f86a48

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 93f86a487e133ce2a04cd3d30ac2a02a6ff63b7a
Parents: 4515215
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Thu Sep 24 15:22:17 2015 -0700
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Thu Sep 24 15:22:17 2015 -0700

----------------------------------------------------------------------
 .../ui/pig-web/app/controllers/scriptJob.js     | 28 +++++++++++---------
 1 file changed, 16 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/93f86a48/contrib/views/pig/src/main/resources/ui/pig-web/app/controllers/scriptJob.js
----------------------------------------------------------------------
diff --git a/contrib/views/pig/src/main/resources/ui/pig-web/app/controllers/scriptJob.js b/contrib/views/pig/src/main/resources/ui/pig-web/app/controllers/scriptJob.js
index d868823..2d8226b 100644
--- a/contrib/views/pig/src/main/resources/ui/pig-web/app/controllers/scriptJob.js
+++ b/contrib/views/pig/src/main/resources/ui/pig-web/app/controllers/scriptJob.js
@@ -21,18 +21,22 @@ var App = require('app');
 App.ScriptJobController = Em.ObjectController.extend(App.FileHandler,{
   fullscreen:false,
   scriptContents:function () {
-    var promise = new Ember.RSVP.Promise(function(resolve,reject){
-      return this.get('content.pigScript').then(function (pigScript) {
-        return resolve(pigScript);
-      },function (error) {
-        var response = (error.responseJSON)?error.responseJSON:{};
-        reject(response.message);
-        if (error.status != 404) {
-          controller.send('showAlert', {'message': Em.I18n.t('job.alert.promise_error',
-            {status:response.status, message:response.message}), status:'error', trace: response.trace});
-        }
-      }.bind(this));
-    }.bind(this));
+    var job = this.get('content'),
+        controller = this,
+        promise = new Ember.RSVP.Promise(function (resolve,reject){
+          var file = (job.get('jobType') !== 'explain') ? job.get('pigScript') : job.store.find('file',[job.get('statusDir'),'source.pig'].join('/'));
+
+          return file.then(function (data) {
+            resolve(data);
+          },function (error) {
+            var response = (error.responseJSON)?error.responseJSON:{};
+            reject(response.message);
+            if (error.status != 404) {
+              controller.send('showAlert', {'message': Em.I18n.t('job.alert.promise_error',
+                {status:response.status, message:response.message}), status:'error', trace: response.trace});
+            }
+          });
+        });
     return Ember.ObjectProxy.extend(Ember.PromiseProxyMixin).create({
       promise: promise
     });


[28/50] [abbrv] ambari git commit: AMBARI-13229. Detect non-compliant python versions and do not attempt to start Ambari Agent (aonishuk)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py b/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
index a44c5af..17eaa61 100644
--- a/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
+++ b/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
@@ -76,7 +76,7 @@ class TestMahoutClient(RMFTestCase):
       hdp_stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'mahout-client', '2.2.1.0-3242'), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'mahout-client', '2.2.1.0-3242'), sudo=True)
     self.assertNoMoreResources()
 
   def test_pre_rolling_restart_23(self):
@@ -103,7 +103,7 @@ class TestMahoutClient(RMFTestCase):
       call_mocks = itertools.cycle([(0, None)]),
       mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'mahout-client', '2.3.0.0-1234'),
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'mahout-client', '2.3.0.0-1234'),
         sudo = True,
     )
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00b06ea/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py b/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
index 9e41e11..6e36508 100644
--- a/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
+++ b/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
@@ -167,7 +167,7 @@ class TestSparkThriftServer(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'spark-thriftserver', version), sudo=True)
+    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'spark-thriftserver', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)


[32/50] [abbrv] ambari git commit: AMBARI-13258. Kerberos Wizard: next button on Confirm Configuration doesn't work after page refresh (akovalenko)

Posted by nc...@apache.org.
AMBARI-13258. Kerberos Wizard: next button on Confirm Configuration doesn't work after page refresh (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a6ed3633
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a6ed3633
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a6ed3633

Branch: refs/heads/branch-dev-patch-upgrade
Commit: a6ed3633de472ac9dfc159629eb0fefb8322cb33
Parents: f67543c
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Mon Sep 28 18:59:37 2015 +0300
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Mon Sep 28 19:10:07 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/routes/add_kerberos_routes.js | 15 +++++----------
 1 file changed, 5 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a6ed3633/ambari-web/app/routes/add_kerberos_routes.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/add_kerberos_routes.js b/ambari-web/app/routes/add_kerberos_routes.js
index ab4752f..817f4ce 100644
--- a/ambari-web/app/routes/add_kerberos_routes.js
+++ b/ambari-web/app/routes/add_kerberos_routes.js
@@ -292,8 +292,6 @@ module.exports = App.WizardRoute.extend({
 
     next: function (router) {
       var kerberosWizardController = router.get('kerberosWizardController');
-      var kerberosWizardStep4Controller = router.get('kerberosWizardStep4Controller');
-      kerberosWizardController.saveServiceConfigProperties(kerberosWizardStep4Controller);
       kerberosWizardController.setDBProperties({
         tasksStatuses: null,
         tasksRequestIds: null
@@ -321,14 +319,11 @@ module.exports = App.WizardRoute.extend({
     back: Em.Router.transitionTo('step4'),
     next: function (router) {
       var kerberosWizardController = router.get('kerberosWizardController');
-      var callback = function () {
-        kerberosWizardController.setDBProperties({
-          tasksStatuses: null,
-          tasksRequestIds: null
-        });
-        router.transitionTo('step7');
-      };
-      callback();
+      kerberosWizardController.setDBProperties({
+        tasksStatuses: null,
+        tasksRequestIds: null
+      });
+      router.transitionTo('step7');
     }
   }),
 


[18/50] [abbrv] ambari git commit: AMBARI-13245. RU cluster in hung state while trying to perform downgrade (ncole)

Posted by nc...@apache.org.
AMBARI-13245. RU cluster in hung state while trying to perform downgrade (ncole)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/631ea43d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/631ea43d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/631ea43d

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 631ea43d6a82ebf13002ca26bfd57c7083a6996c
Parents: e3b0c36
Author: Nate Cole <nc...@hortonworks.com>
Authored: Fri Sep 25 15:33:10 2015 -0400
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Fri Sep 25 16:23:06 2015 -0400

----------------------------------------------------------------------
 .../apache/ambari/server/utils/Parallel.java    | 34 ++++++++++++--------
 1 file changed, 20 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/631ea43d/ambari-server/src/main/java/org/apache/ambari/server/utils/Parallel.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/Parallel.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/Parallel.java
index a67ee5c..9ca039b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/Parallel.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/Parallel.java
@@ -19,22 +19,21 @@ package org.apache.ambari.server.utils;
 
 import java.util.Arrays;
 import java.util.Collections;
-
-import java.util.concurrent.Callable;
-import java.util.concurrent.atomic.AtomicInteger;
+import java.util.LinkedList;
+import java.util.List;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.ThreadFactory;
-import java.util.List;
-import java.util.LinkedList;
-import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -48,12 +47,14 @@ public class Parallel {
   /**
    * Max pool size
    */
-  private static final int MAX_POOL_SIZE = Math.max(2, Runtime.getRuntime().availableProcessors());
+  private static final int MAX_POOL_SIZE = Math.max(8, Runtime.getRuntime().availableProcessors());
 
   /**
-   * Keep alive time (1 sec)
+   * Keep alive time (15 min)
    */
-  private static final int KEEP_ALIVE_TIME_MILLISECONDS = 1000;
+  // !!! changed from 1 second because EclipseLink was making threads idle and
+  // they kept timing out
+  private static final int KEEP_ALIVE_TIME_MINUTES = 15;
 
   /**
    * Poll duration (10 secs)
@@ -61,6 +62,11 @@ public class Parallel {
   private static final int POLL_DURATION_MILLISECONDS = 10000;
 
   /**
+   * Core pool size
+   */
+  private static final int CORE_POOL_SIZE = 2;
+
+  /**
    * Logger
    */
   private static final Logger LOG = LoggerFactory.getLogger(Parallel.class);
@@ -81,10 +87,10 @@ public class Parallel {
 
     // Create thread pool
     ThreadPoolExecutor threadPool = new ThreadPoolExecutor(
-        0,                                        // Core pool size
+        CORE_POOL_SIZE,                           // Core pool size
         MAX_POOL_SIZE,                            // Max pool size
-        KEEP_ALIVE_TIME_MILLISECONDS,             // Keep alive time for idle threads
-        TimeUnit.MILLISECONDS,
+        KEEP_ALIVE_TIME_MINUTES,                  // Keep alive time for idle threads
+        TimeUnit.MINUTES,
         blockingQueue,                            // Using synchronous queue
         new ParallelLoopsThreadFactory(),         // Thread pool factory to use
         new ThreadPoolExecutor.CallerRunsPolicy() // Rejected tasks will run on calling thread.


[23/50] [abbrv] ambari git commit: AMBARI-13250. "Customize Services" page loses Smart Configs after clicking "Back" and "Next" buttons. YARN CS config becomes empty and required (srimanth)

Posted by nc...@apache.org.
AMBARI-13250. "Customize Services" page loses Smart Configs after clicking "Back" and "Next" buttons. YARN CS config becomes empty and required (srimanth)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/94baa2f0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/94baa2f0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/94baa2f0

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 94baa2f02540f1d0cb4d2e0cc7fdb3bf6b17fccf
Parents: 474e408
Author: Srimanth Gunturi <sg...@hortonworks.com>
Authored: Sat Sep 26 12:57:36 2015 -0700
Committer: Srimanth Gunturi <sg...@hortonworks.com>
Committed: Sat Sep 26 13:16:22 2015 -0700

----------------------------------------------------------------------
 ambari-web/app/controllers/installer.js | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/94baa2f0/ambari-web/app/controllers/installer.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/installer.js b/ambari-web/app/controllers/installer.js
index f01a8f8..194d862 100644
--- a/ambari-web/app/controllers/installer.js
+++ b/ambari-web/app/controllers/installer.js
@@ -237,6 +237,7 @@ App.InstallerController = App.WizardController.extend({
     App.Section.find().clear();
     App.SubSection.find().clear();
     App.Tab.find().clear();
+    this.set('stackConfigsLoaded', false);
     if (stacks && stacks.get('length')) {
       App.set('currentStackVersion', App.Stack.find().findProperty('isSelected').get('id'));
       dfd.resolve(true);


[04/50] [abbrv] ambari git commit: AMBARI-12951. Prompt user to save checkpoint before shutdown if last checkpoint is too old. (xiwang via yusaku)

Posted by nc...@apache.org.
AMBARI-12951. Prompt user to save checkpoint before shutdown if last checkpoint is too old. (xiwang via yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a8301564
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a8301564
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a8301564

Branch: refs/heads/branch-dev-patch-upgrade
Commit: a8301564a937ec1486b19d2be344e5c83ae9e28c
Parents: eb2e615
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Thu Sep 24 12:12:56 2015 -0700
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Thu Sep 24 12:12:56 2015 -0700

----------------------------------------------------------------------
 ambari-web/app/controllers/main/service/item.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a8301564/ambari-web/app/controllers/main/service/item.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/item.js b/ambari-web/app/controllers/main/service/item.js
index 89fbb69..ae1a327 100644
--- a/ambari-web/app/controllers/main/service/item.js
+++ b/ambari-web/app/controllers/main/service/item.js
@@ -260,7 +260,7 @@ App.MainServiceItemController = Em.Controller.extend(App.SupportClientConfigsDow
     if (!lastCheckpointTime) {
       this.set("isNNCheckpointTooOld", null);
     } else {
-      var time_criteria = 12; // time in hours to define how many hours ago is too old
+      var time_criteria = App.nnCheckpointAgeAlertThreshold; // time in hours to define how many hours ago is too old
       var time_ago = (Math.round(App.dateTime() / 1000) - (time_criteria * 3600)) *1000;
       if (lastCheckpointTime <= time_ago) {
         // too old, set the effected hostName


[10/50] [abbrv] ambari git commit: AMBARI-13236. Selecting flume agent doesn't show graphs and metrics (srimanth)

Posted by nc...@apache.org.
AMBARI-13236. Selecting flume agent doesn't show graphs and metrics (srimanth)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cda68535
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cda68535
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cda68535

Branch: refs/heads/branch-dev-patch-upgrade
Commit: cda6853564bd68f38ed25543351247bea58e26b2
Parents: bc94537
Author: Srimanth Gunturi <sg...@hortonworks.com>
Authored: Thu Sep 24 17:48:51 2015 -0700
Committer: Srimanth Gunturi <sg...@hortonworks.com>
Committed: Thu Sep 24 18:29:22 2015 -0700

----------------------------------------------------------------------
 .../service/info/metrics/flume/flume_metric_graphs.js   | 12 ++++++++----
 ambari-web/app/views/main/service/services/flume.js     |  2 +-
 2 files changed, 9 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/cda68535/ambari-web/app/views/main/service/info/metrics/flume/flume_metric_graphs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/info/metrics/flume/flume_metric_graphs.js b/ambari-web/app/views/main/service/info/metrics/flume/flume_metric_graphs.js
index d38074e..415d423 100644
--- a/ambari-web/app/views/main/service/info/metrics/flume/flume_metric_graphs.js
+++ b/ambari-web/app/views/main/service/info/metrics/flume/flume_metric_graphs.js
@@ -49,11 +49,15 @@ App.MainServiceInfoFlumeGraphsView = App.MainServiceInfoSummaryMetricGraphsView.
     var metricNames = {};
     var metricItems = [];
     if (data != null && data.metrics != null && data.metrics.flume != null && data.metrics.flume.flume != null && data.metrics.flume.flume[metricType] != null) {
-      for (var name in data.metrics.flume.flume[metricType]) {
-        for (var metricName in data.metrics.flume.flume[metricType][name]) {
-          metricNames[metricName] = name;
+      for ( var name in data.metrics.flume.flume[metricType]) {
+        if (data.metrics.flume.flume[metricType].hasOwnProperty(name)) {
+          for ( var metricName in data.metrics.flume.flume[metricType][name]) {
+            if (data.metrics.flume.flume[metricType][name].hasOwnProperty(metricName)) {
+              metricNames[metricName] = name;
+            }
+          }
+          metricItems.push(name);
         }
-        metricItems.push(name);
       }
     }
     // Now that we have collected all metric names, we create

http://git-wip-us.apache.org/repos/asf/ambari/blob/cda68535/ambari-web/app/views/main/service/services/flume.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/services/flume.js b/ambari-web/app/views/main/service/services/flume.js
index 819e994..013a10b 100644
--- a/ambari-web/app/views/main/service/services/flume.js
+++ b/ambari-web/app/views/main/service/services/flume.js
@@ -187,6 +187,6 @@ App.MainDashboardServiceFlumeView = App.TableView.extend(App.MainDashboardServic
       mockData.id = 'metric' + index;
       mockData.toggleIndex = '#' + mockData.id;
     });
-    this.set('parentView.parentView.collapsedSections', mockMetricData);
+    this.set('parentView.collapsedSections', mockMetricData);
   }
 });


[19/50] [abbrv] ambari git commit: AMBARI-13246. Oozie server fails to start with customized user.(vbrodetskyi)

Posted by nc...@apache.org.
AMBARI-13246. Oozie server fails to start with customized user.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/67b9d483
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/67b9d483
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/67b9d483

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 67b9d4839f4260b782ffd19f4f617174a1bc624f
Parents: 631ea43
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Sat Sep 26 00:06:52 2015 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Sat Sep 26 00:06:52 2015 +0300

----------------------------------------------------------------------
 .../src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/67b9d483/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index b825d31..8deff04 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -209,7 +209,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
 
       # Remove old properties if user was renamed
       userOldValue = getOldValue(self, services, user_properties["config"], user_properties["propertyName"])
-      if userOldValue is not None:
+      if userOldValue is not None and userOldValue != user_name:
         putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.hosts".format(userOldValue), 'delete', 'true')
         putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.groups".format(userOldValue), 'delete', 'true')
         services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(userOldValue)})


[12/50] [abbrv] ambari git commit: AMBARI-13195. Stack advisor error: argument of type 'NoneType' is not iterable. Second patch (akovalenko)

Posted by nc...@apache.org.
AMBARI-13195. Stack advisor error: argument of type 'NoneType' is not iterable. Second patch (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/31096c8a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/31096c8a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/31096c8a

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 31096c8ab690e96029e360710db275642c952fb7
Parents: 360e608
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Fri Sep 25 16:34:54 2015 +0300
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Fri Sep 25 16:35:34 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/mixins/common/serverValidator.js | 87 +++++++++++---------
 1 file changed, 47 insertions(+), 40 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/31096c8a/ambari-web/app/mixins/common/serverValidator.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/serverValidator.js b/ambari-web/app/mixins/common/serverValidator.js
index 469c7cb..46c0b13 100644
--- a/ambari-web/app/mixins/common/serverValidator.js
+++ b/ambari-web/app/mixins/common/serverValidator.js
@@ -243,10 +243,9 @@ App.ServerValidatorMixin = Em.Mixin.create({
     var recommendations = this.get('hostGroups');
     var services = this.get('services');
     var stepConfigs = this.get('stepConfigs');
-    var allConfigTypes = [];
-    var callback = function () {
-      recommendations.blueprint.configurations = blueprintUtils.buildConfigsJSON(services, stepConfigs);
 
+    return this.getBlueprintConfigurations().done(function(blueprintConfigurations){
+      recommendations.blueprint.configurations = blueprintConfigurations;
       return App.ajax.send({
         name: 'config.validations',
         sender: self,
@@ -262,61 +261,69 @@ App.ServerValidatorMixin = Em.Mixin.create({
       }).complete(function () {
         self.warnUser(deferred);
       });
-    };
+    });
+  },
+
+  /**
+   * Return JSON for blueprint configurations
+   * @returns {*}
+   */
+  getBlueprintConfigurations: function () {
+    var dfd = $.Deferred();
+    var services = this.get('services');
+    var stepConfigs = this.get('stepConfigs');
+    var allConfigTypes = [];
 
     services.forEach(function (service) {
       allConfigTypes = allConfigTypes.concat(Em.keys(service.get('configTypes')))
     });
     // check if we have configs from 'cluster-env', if not, then load them, as they are mandatory for validation request
     if (!allConfigTypes.contains('cluster-env')) {
-      services = services.concat(Em.Object.create({
-        serviceName: 'MISC',
-        configTypes: {'cluster-env': {}}
-      }));
-      App.ajax.send({
-        name: 'config.cluster_env_site',
-        sender: self,
-        success: 'getClusterEnvSiteTagSuccess',
-        error: 'validationError'
-      }).complete(function () {
+      this.getClusterEnvConfigsForValidation().done(function(clusterEnvConfigs){
+        services = services.concat(Em.Object.create({
+          serviceName: 'MISC',
+          configTypes: {'cluster-env': {}}
+        }));
         stepConfigs = stepConfigs.concat(Em.Object.create({
           serviceName: 'MISC',
-          configs: self.get('clusterEnvConfigs')
+          configs: clusterEnvConfigs
         }));
-        callback(deferred);
+        dfd.resolve(blueprintUtils.buildConfigsJSON(services, stepConfigs));
       });
     } else {
-      callback(deferred);
+      dfd.resolve(blueprintUtils.buildConfigsJSON(services, stepConfigs));
     }
+    return dfd.promise();
   },
 
-  /**
-   * success callback after getting response from server
-   * convert cluster-env configs to array to be used in validation request
-   * @param data
-   */
-  getClusterEnvSiteTagSuccess: function (data) {
-    var self = this;
-    App.router.get('configurationController').getConfigsByTags([{
-      siteName: data.items[0].type,
-      tagName: data.items[0].tag
-    }]).done(function (clusterEnvConfigs) {
-      var configsObject = clusterEnvConfigs[0].properties;
-      var configsArray = [];
-      for (var property in configsObject) {
-        if (configsObject.hasOwnProperty(property)) {
-          configsArray.push(Em.Object.create({
-            name: property,
-            value: configsObject[property],
-            filename: 'cluster-env.xml'
-          }));
+  getClusterEnvConfigsForValidation: function () {
+    var dfd = $.Deferred();
+    App.ajax.send({
+      name: 'config.cluster_env_site',
+      sender: this,
+      error: 'validationError'
+    }).done(function (data) {
+      App.router.get('configurationController').getConfigsByTags([{
+        siteName: data.items[0].type,
+        tagName: data.items[0].tag
+      }]).done(function (clusterEnvConfigs) {
+        var configsObject = clusterEnvConfigs[0].properties;
+        var configsArray = [];
+        for (var property in configsObject) {
+          if (configsObject.hasOwnProperty(property)) {
+            configsArray.push(Em.Object.create({
+              name: property,
+              value: configsObject[property],
+              filename: 'cluster-env.xml'
+            }));
+          }
         }
-      }
-      self.set('clusterEnvConfigs', configsArray);
+        dfd.resolve(configsArray);
+      });
     });
+    return dfd.promise();
   },
 
-
   /**
    * @method validationSuccess
    * success callback after getting response from server


[16/50] [abbrv] ambari git commit: Revert "AMBARI-13229. Detect non-compliant python versions and do not attempt to start Ambari Agent (aonishuk)"

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py b/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
index 84b8967..031e0ac 100644
--- a/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
+++ b/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
@@ -165,7 +165,7 @@ class TestSparkThriftServer(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'spark-thriftserver', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'spark-thriftserver', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)


[47/50] [abbrv] ambari git commit: AMBARI-13269 Ambari map/reduce/AM java opts do not change if memory settings are modified (dsen)

Posted by nc...@apache.org.
AMBARI-13269 Ambari map/reduce/AM java opts do not change if memory settings are modified (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/50099dd4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/50099dd4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/50099dd4

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 50099dd40c72f07f55cbe60a0426f9e70e2102bb
Parents: d4edf46
Author: Dmytro Sen <ds...@apache.org>
Authored: Tue Sep 29 21:00:43 2015 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Tue Sep 29 21:00:43 2015 +0300

----------------------------------------------------------------------
 .../stacks/HDP/2.0.6/services/stack_advisor.py  | 11 +++++++--
 .../stacks/2.2/common/test_stack_advisor.py     | 24 +++++++++++++-------
 2 files changed, 25 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/50099dd4/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index 8deff04..c4450b6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -105,12 +105,19 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     if"properties" not in config[configType]:
       config[configType]["properties"] = {}
     def appendProperty(key, value):
-      if {'type': configType, 'name': key} in changedConfigs:
+      # If property exists in changedConfigs, do not override, use user defined property
+      if self.__isPropertyInChangedConfigs(configType, key, changedConfigs):
         config[configType]["properties"][key] = userConfigs[configType]['properties'][key]
       else:
         config[configType]["properties"][key] = str(value)
     return appendProperty
 
+  def __isPropertyInChangedConfigs(self, configType, propertyName, changedConfigs):
+    for changedConfig in changedConfigs:
+      if changedConfig['type']==configType and changedConfig['name']==propertyName:
+        return True
+    return False
+
   def putPropertyAttribute(self, config, configType):
     if configType not in config:
       config[configType] = {}
@@ -1237,4 +1244,4 @@ def getMemorySizeRequired(components, configurations):
   return totalMemoryRequired
 
 def round_to_n(mem_size, n=128):
-  return int(round(mem_size / float(n))) * int(n)
\ No newline at end of file
+  return int(round(mem_size / float(n))) * int(n)

http://git-wip-us.apache.org/repos/asf/ambari/blob/50099dd4/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index 054bf96..97cf4ca 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -837,15 +837,18 @@ class TestHDP22StackAdvisor(TestCase):
       "changed-configurations": [
         {
           "type": "yarn-site",
-          "name": "yarn.nodemanager.resource.memory-mb"
+          "name": "yarn.nodemanager.resource.memory-mb",
+          "old_value": "512"
         },
         {
           "type": "yarn-site",
-          "name": "yarn.scheduler.minimum-allocation-mb"
+          "name": "yarn.scheduler.minimum-allocation-mb",
+          "old_value": "512"
         },
         {
           "type": "yarn-site",
-          "name": "yarn.scheduler.maximum-allocation-mb"
+          "name": "yarn.scheduler.maximum-allocation-mb",
+          "old_value": "512"
         },
         {
           "type": "yarn-site",
@@ -926,7 +929,8 @@ class TestHDP22StackAdvisor(TestCase):
     configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.percentage-physical-cpu-limit"] = '0.5'
     services["changed-configurations"].append({
           "type": "yarn-site",
-          "name": "yarn.nodemanager.resource.percentage-physical-cpu-limit"
+          "name": "yarn.nodemanager.resource.percentage-physical-cpu-limit",
+          "old_value": "6"
         })
     expected["yarn-site"]["properties"]["yarn.nodemanager.resource.cpu-vcores"] = '5'
     expected["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-vcores"] = '1'
@@ -1600,7 +1604,8 @@ class TestHDP22StackAdvisor(TestCase):
       "changed-configurations": [
         {
           "type": "yarn-site",
-          "name": "yarn.scheduler.minimum-allocation-mb"
+          "name": "yarn.scheduler.minimum-allocation-mb",
+          "old_value": "512"
         },
         ]
 
@@ -1836,7 +1841,8 @@ class TestHDP22StackAdvisor(TestCase):
       "changed-configurations": [
         {
           "type": "yarn-site",
-          "name": "yarn.scheduler.minimum-allocation-mb"
+          "name": "yarn.scheduler.minimum-allocation-mb",
+          "old_value": "512"
         },
       ]
 
@@ -2114,7 +2120,8 @@ class TestHDP22StackAdvisor(TestCase):
     services['changed-configurations'] = [
       {
         "type": "ams-hbase-env",
-        "name": "hbase_master_heapsize"
+        "name": "hbase_master_heapsize",
+        "old_value": "1024"
       }
     ]
 
@@ -2155,7 +2162,8 @@ class TestHDP22StackAdvisor(TestCase):
     services['changed-configurations'] = [
       {
         "type": "ams-hbase-env",
-        "name": "hbase_regionserver_heapsize"
+        "name": "hbase_regionserver_heapsize",
+        "old_value": "512"
       }
     ]
     services["configurations"]['ams-hbase-site']['properties']['hbase.rootdir'] = 'hdfs://host1/amshbase'


[46/50] [abbrv] ambari git commit: AMBARI-13270. Service tabs are not visible on Customize service page of SAPHD (akovalenko)

Posted by nc...@apache.org.
AMBARI-13270. Service tabs are not visible on Customize service page of SAPHD (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d4edf461
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d4edf461
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d4edf461

Branch: refs/heads/branch-dev-patch-upgrade
Commit: d4edf4619c1c0bb309920ba86e66012a2a2e7090
Parents: a1488ce
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Tue Sep 29 18:31:38 2015 +0300
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Tue Sep 29 18:31:38 2015 +0300

----------------------------------------------------------------------
 .../resources/SAPHD/custom_ui.less              | 21 ++++++++++++--------
 .../templates/common/configs/service_config.hbs |  2 +-
 ambari-web/app/templates/main/dashboard.hbs     |  2 +-
 .../views/main/admin/stack_upgrade/menu_view.js |  2 +-
 ambari-web/app/views/main/host/menu.js          |  2 +-
 ambari-web/app/views/main/service/info/menu.js  |  2 +-
 6 files changed, 18 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d4edf461/ambari-common/src/main/python/pluggable_stack_definition/resources/SAPHD/custom_ui.less
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/pluggable_stack_definition/resources/SAPHD/custom_ui.less b/ambari-common/src/main/python/pluggable_stack_definition/resources/SAPHD/custom_ui.less
index 055a680..c8aba39 100644
--- a/ambari-common/src/main/python/pluggable_stack_definition/resources/SAPHD/custom_ui.less
+++ b/ambari-common/src/main/python/pluggable_stack_definition/resources/SAPHD/custom_ui.less
@@ -36,16 +36,21 @@ body {
   &.text-success {
     color: @green-on-background;
   }
-}
 
-// Text color for not-active nav tab
-.nav-tabs > li > a {
-  color: @text-on-background;
-}
+  // Text color for not-active nav tab
+  &.nav-tabs > li > a {
+    color: @text-on-background;
+  }
 
-// Text color for hovered nav tab
-.nav-tabs > li:hover > a {
-  color: #000000;
+  // Text color for hovered nav tab
+  &.nav-tabs > li:hover > a {
+    color: #000000;
+  }
+
+  // Text color for active nav tab
+  &.nav-tabs > li.active > a {
+    color: #000000;
+  }
 }
 
 // Text and caret color for quick links dropdown (not active)

http://git-wip-us.apache.org/repos/asf/ambari/blob/d4edf461/ambari-web/app/templates/common/configs/service_config.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/configs/service_config.hbs b/ambari-web/app/templates/common/configs/service_config.hbs
index 72c7c35..29b05fe 100644
--- a/ambari-web/app/templates/common/configs/service_config.hbs
+++ b/ambari-web/app/templates/common/configs/service_config.hbs
@@ -109,7 +109,7 @@
     </div>
   {{/unless}}
   {{#if view.supportsConfigLayout}}
-    <ul class="nav nav-tabs mbm config-tabs">
+    <ul class="nav nav-tabs mbm config-tabs background-text">
       {{#each tab in view.tabs}}
         <li rel='tooltip' {{bindAttr class="tab.isActive:active tab.isHiddenByFilter:disabled" data-original-title="tab.tooltipMsg"}}>
           <a href="#" {{action "setActiveTab" tab target="view"}} {{bindAttr data-target="tab.headingClass"}} data-toggle="tab">

http://git-wip-us.apache.org/repos/asf/ambari/blob/d4edf461/ambari-web/app/templates/main/dashboard.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/dashboard.hbs b/ambari-web/app/templates/main/dashboard.hbs
index 707c5df..f020162 100644
--- a/ambari-web/app/templates/main/dashboard.hbs
+++ b/ambari-web/app/templates/main/dashboard.hbs
@@ -22,7 +22,7 @@
     {{view App.AllServicesActionView}}
   </div>
     <div class="summary-width span10" id="dashboard-widgets-container">
-      <ul class="nav nav-tabs">
+      <ul class="nav nav-tabs background-text">
         {{#each category in view.categories}}
           {{#view view.NavItemView itemBinding="category.name" }}
             <a href="#" {{action "goToDashboardView" category.url}} >{{category.label}}</a>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d4edf461/ambari-web/app/views/main/admin/stack_upgrade/menu_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/menu_view.js b/ambari-web/app/views/main/admin/stack_upgrade/menu_view.js
index 1006013..1e84f1c 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/menu_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/menu_view.js
@@ -20,7 +20,7 @@ var App = require('app');
 
 App.MainAdminStackMenuView = Em.CollectionView.extend({
   tagName: 'ul',
-  classNames: ["nav", "nav-tabs"],
+  classNames: ["nav", "nav-tabs", "background-text"],
   defaultRoute: 'services',
 
   content: function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/d4edf461/ambari-web/app/views/main/host/menu.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/host/menu.js b/ambari-web/app/views/main/host/menu.js
index 92c123c..15608d1 100644
--- a/ambari-web/app/views/main/host/menu.js
+++ b/ambari-web/app/views/main/host/menu.js
@@ -20,7 +20,7 @@ var App = require('app');
 
 App.MainHostMenuView = Em.CollectionView.extend({
   tagName: 'ul',
-  classNames: ["nav", "nav-tabs"],
+  classNames: ["nav", "nav-tabs", "background-text"],
   host: null,
 
   content: function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/d4edf461/ambari-web/app/views/main/service/info/menu.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/info/menu.js b/ambari-web/app/views/main/service/info/menu.js
index c2577ca..3de23fd 100644
--- a/ambari-web/app/views/main/service/info/menu.js
+++ b/ambari-web/app/views/main/service/info/menu.js
@@ -20,7 +20,7 @@ var App = require('app');
 
 App.MainServiceInfoMenuView = Em.CollectionView.extend({
   tagName: 'ul',
-  classNames: ["nav", "nav-tabs"],
+  classNames: ["nav", "nav-tabs", "background-text"],
   content:function(){
     var menuItems = [
       { label: Em.I18n.t('services.service.info.menu.summary'), id: 'summary-service-tab',routing:'summary', active:"active"}


[34/50] [abbrv] ambari git commit: AMBARI-13251. RU - HDFS_Client restart and hdp-select causes dfs_data_dir_mount.hist to be lost, move file to static location (alejandro)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.2/configs/ranger-usersync-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/ranger-usersync-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/ranger-usersync-upgrade.json
index fa4d614..977749c 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/ranger-usersync-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/ranger-usersync-upgrade.json
@@ -612,7 +612,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "256m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.2/configs/spark-job-history-server.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/spark-job-history-server.json b/ambari-server/src/test/python/stacks/2.2/configs/spark-job-history-server.json
index 185d4d5..ea617fa 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/spark-job-history-server.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/spark-job-history-server.json
@@ -88,7 +88,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "256m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json b/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json
index f5a4487..d8f05fd 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json
@@ -205,7 +205,6 @@
             "namenode_opt_maxpermsize": "256m", 
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:M
 axPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DR
 FAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environme
 nt.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of
  hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-cl
 ient/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"", 
             "namenode_heapsize": "1024m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "namenode_opt_newsize": "256m", 
             "nfsgateway_heapsize": "1024", 
             "dtnode_heapsize": "1024m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json b/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json
index d31c621..24ab446 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json
@@ -482,7 +482,6 @@
             "namenode_opt_maxpermsize": "256m", 
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:M
 axPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DR
 FAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environme
 nt.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of
  hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-cl
 ient/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"", 
             "namenode_heapsize": "1024m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "namenode_opt_newsize": "256m", 
             "nfsgateway_heapsize": "1024", 
             "dtnode_heapsize": "1024m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.3/configs/spark_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/spark_default.json b/ambari-server/src/test/python/stacks/2.3/configs/spark_default.json
index 730a81b..9f3fb90 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/spark_default.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/spark_default.json
@@ -175,7 +175,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "256m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/resources/custom_actions/ru_execute_tasks_namenode_prepare.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/custom_actions/ru_execute_tasks_namenode_prepare.json b/ambari-server/src/test/resources/custom_actions/ru_execute_tasks_namenode_prepare.json
index d42d01c..60cab52 100644
--- a/ambari-server/src/test/resources/custom_actions/ru_execute_tasks_namenode_prepare.json
+++ b/ambari-server/src/test/resources/custom_actions/ru_execute_tasks_namenode_prepare.json
@@ -175,7 +175,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "256m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-web/app/assets/data/configurations/config_versions.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/configurations/config_versions.json b/ambari-web/app/assets/data/configurations/config_versions.json
index 603d33e..c9af200 100644
--- a/ambari-web/app/assets/data/configurations/config_versions.json
+++ b/ambari-web/app/assets/data/configurations/config_versions.json
@@ -326,7 +326,6 @@
           "version" : 1,
           "properties" : {
             "content" : "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options append
 ed to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateSta
 mps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The followin
 g applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HA
 DOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share
 /java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP
 _OPTS\"",
-            "dfs.datanode.data.dir.mount.file" : "/etc/hadoop/conf/dfs_data_dir_mount.hist",
             "dtnode_heapsize" : "1024m",
             "hadoop_heapsize" : "1024",
             "hadoop_pid_dir_prefix" : "/var/run/hadoop",
@@ -544,7 +543,6 @@
           "version" : 1,
           "properties" : {
             "content" : "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options append
 ed to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateSta
 mps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The followin
 g applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HA
 DOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share
 /java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP
 _OPTS\"",
-            "dfs.datanode.data.dir.mount.file" : "/etc/hadoop/conf/dfs_data_dir_mount.hist",
             "dtnode_heapsize" : "1024m",
             "hadoop_heapsize" : "1024",
             "hadoop_pid_dir_prefix" : "/var/run/hadoop",
@@ -826,7 +824,6 @@
           "version" : 1,
           "properties" : {
             "content" : "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options append
 ed to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateSta
 mps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The followin
 g applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HA
 DOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share
 /java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP
 _OPTS\"",
-            "dfs.datanode.data.dir.mount.file" : "/etc/hadoop/conf/dfs_data_dir_mount.hist",
             "dtnode_heapsize" : "1024m",
             "hadoop_heapsize" : "1024",
             "hadoop_pid_dir_prefix" : "/var/run/hadoop",

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-web/app/assets/data/configurations/service_versions.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/configurations/service_versions.json b/ambari-web/app/assets/data/configurations/service_versions.json
index c5eb46c..9851a83 100644
--- a/ambari-web/app/assets/data/configurations/service_versions.json
+++ b/ambari-web/app/assets/data/configurations/service_versions.json
@@ -182,7 +182,6 @@
           "version" : 2,
           "properties" : {
             "content" : "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defau
 lt.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix
 }}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}
 /$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HA
 DOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n
 # The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
-            "dfs.datanode.data.dir.mount.file" : "/etc/hadoop/conf/dfs_data_dir_mount.hist",
             "dtnode_heapsize" : "1024m",
             "hadoop_heapsize" : "1024",
             "hadoop_pid_dir_prefix" : "/var/run/hadoop",
@@ -225,7 +224,6 @@
           "version" : 1,
           "properties" : {
             "content" : "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defau
 lt.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix
 }}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}
 /$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HA
 DOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n
 # The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
-            "dfs.datanode.data.dir.mount.file" : "/etc/hadoop/conf/dfs_data_dir_mount.hist",
             "dtnode_heapsize" : "1024m",
             "hadoop_heapsize" : "1024",
             "hadoop_pid_dir_prefix" : "/var/run/hadoop",
@@ -397,7 +395,6 @@
           "version" : 2,
           "properties" : {
             "content" : "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defau
 lt.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix
 }}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}
 /$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HA
 DOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n
 # The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
-            "dfs.datanode.data.dir.mount.file" : "/etc/hadoop/conf/dfs_data_dir_mount.hist",
             "dtnode_heapsize" : "1024m",
             "hadoop_heapsize" : "1024",
             "hadoop_pid_dir_prefix" : "/var/run/hadoop",

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-web/app/assets/data/stacks/HDP-2.2/configurations.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/stacks/HDP-2.2/configurations.json b/ambari-web/app/assets/data/stacks/HDP-2.2/configurations.json
index 0399656..6a80523 100644
--- a/ambari-web/app/assets/data/stacks/HDP-2.2/configurations.json
+++ b/ambari-web/app/assets/data/stacks/HDP-2.2/configurations.json
@@ -4297,20 +4297,6 @@
           }
         },
         {
-          "href" : "http://c6401:8080/api/v1/stacks/HDP/versions/2.2/services/HDFS/configurations/dfs.datanode.data.dir.mount.file",
-          "StackConfigurations" : {
-            "final" : "false",
-            "property_description" : "File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.",
-            "property_name" : "dfs.datanode.data.dir.mount.file",
-            "property_type" : [ ],
-            "property_value" : "/etc/hadoop/conf/dfs_data_dir_mount.hist",
-            "service_name" : "HDFS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.2",
-            "type" : "hadoop-env.xml"
-          }
-        },
-        {
           "href" : "http://c6401:8080/api/v1/stacks/HDP/versions/2.2/services/HDFS/configurations/dfs.datanode.data.dir.perm",
           "StackConfigurations" : {
             "final" : "false",

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-web/app/data/BIGTOP/site_properties.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/BIGTOP/site_properties.js b/ambari-web/app/data/BIGTOP/site_properties.js
index 7b0e128..a52e6a6 100644
--- a/ambari-web/app/data/BIGTOP/site_properties.js
+++ b/ambari-web/app/data/BIGTOP/site_properties.js
@@ -73,18 +73,6 @@ module.exports =
       "index": 3
     },
     {
-      "name": "dfs.datanode.data.dir.mount.file",
-      "displayName": "File that stores mount point for each data dir",
-      "description": "File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.",
-      "recommendedValue": "/etc/hadoop/conf/dfs_data_dir_mount.hist",
-      "displayType": "directory",
-      "isVisible": true,
-      "category": "DATANODE",
-      "serviceName": "HDFS",
-      "filename": "hadoop-env.xml",
-      "index": 4
-    },
-    {
       "name": "dfs.datanode.data.dir",
       "displayName": "DataNode directories",
       "displayType": "directories",

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-web/app/data/HDP2/site_properties.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/HDP2/site_properties.js b/ambari-web/app/data/HDP2/site_properties.js
index c72c570..2f99772 100644
--- a/ambari-web/app/data/HDP2/site_properties.js
+++ b/ambari-web/app/data/HDP2/site_properties.js
@@ -61,14 +61,6 @@ var hdp2properties = [
     "index": 3
   },
   {
-    "name": "dfs.datanode.data.dir.mount.file",
-    "displayType": "directory",
-    "category": "DATANODE",
-    "serviceName": "HDFS",
-    "filename": "hadoop-env.xml",
-    "index": 4
-  },
-  {
     "name": "dfs.datanode.data.dir",
     "displayType": "directories",
     "category": "DATANODE",


[49/50] [abbrv] ambari git commit: AMBARI-13227. Debian 7. Host check does not show warning about THP enabled on hosts. (aonishuk)

Posted by nc...@apache.org.
AMBARI-13227. Debian 7. Host check does not show warning about THP enabled on hosts. (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b3d36fb5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b3d36fb5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b3d36fb5

Branch: refs/heads/branch-dev-patch-upgrade
Commit: b3d36fb5c7dcd498bb48c629c810a74161c139f3
Parents: e203fae
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Sep 29 17:11:21 2015 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Wed Sep 30 15:59:37 2015 +0300

----------------------------------------------------------------------
 .../src/main/python/ambari_agent/HostInfo.py    | 14 +++++++++----
 .../test/python/ambari_agent/TestHostInfo.py    | 22 ++++++++++++++++++++
 .../custom_actions/scripts/check_host.py        | 13 ++++++++----
 .../test/python/custom_actions/TestCheckHost.py |  1 +
 4 files changed, 42 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b3d36fb5/ambari-agent/src/main/python/ambari_agent/HostInfo.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HostInfo.py b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
index 3d7125a..49f8417 100644
--- a/ambari-agent/src/main/python/ambari_agent/HostInfo.py
+++ b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
@@ -157,7 +157,8 @@ class HostInfoLinux(HostInfo):
   DEFAULT_SERVICE_NAME = "ntpd"
   SERVICE_STATUS_CMD = "%s %s status" % (SERVICE_CMD, DEFAULT_SERVICE_NAME)
 
-  THP_FILE = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
+  THP_FILE_REDHAT = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
+  THP_FILE_UBUNTU = "/sys/kernel/mm/transparent_hugepage/enabled"
 
   def __init__(self, config=None):
     super(HostInfoLinux, self).__init__(config)
@@ -216,10 +217,15 @@ class HostInfoLinux(HostInfo):
     pass
 
   def getTransparentHugePage(self):
-    # This file exist only on redhat 6
     thp_regex = "\[(.+)\]"
-    if os.path.isfile(self.THP_FILE):
-      with open(self.THP_FILE) as f:
+    file_name = None
+    if OSCheck.is_ubuntu_family():
+      file_name = self.THP_FILE_UBUNTU
+    elif OSCheck.is_redhat_family():
+      file_name = self.THP_FILE_REDHAT
+
+    if file_name:
+      with open(file_name) as f:
         file_content = f.read()
         return re.search(thp_regex, file_content).groups()[0]
     else:

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3d36fb5/ambari-agent/src/test/python/ambari_agent/TestHostInfo.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestHostInfo.py b/ambari-agent/src/test/python/ambari_agent/TestHostInfo.py
index 57e4224..a982f5f 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestHostInfo.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestHostInfo.py
@@ -537,5 +537,27 @@ class TestHostInfo(TestCase):
     os_path_isfile_mock.return_value = False
     self.assertEqual("", hostInfo.getTransparentHugePage())
 
+  @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = ('debian','7','Final')))
+  @patch("os.path.isfile")
+  @patch('__builtin__.open')
+  def test_transparent_huge_page_debian(self, open_mock, os_path_isfile_mock):
+    context_manager_mock = MagicMock()
+    open_mock.return_value = context_manager_mock
+    file_mock = MagicMock()
+    file_mock.read.return_value = "[never] always"
+    enter_mock = MagicMock()
+    enter_mock.return_value = file_mock
+    exit_mock  = MagicMock()
+    setattr( context_manager_mock, '__enter__', enter_mock )
+    setattr( context_manager_mock, '__exit__', exit_mock )
+
+    hostInfo = HostInfoLinux()
+
+    os_path_isfile_mock.return_value = True
+    self.assertEqual("never", hostInfo.getTransparentHugePage())
+
+    os_path_isfile_mock.return_value = False
+    self.assertEqual("", hostInfo.getTransparentHugePage())
+
 if __name__ == "__main__":
   unittest.main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3d36fb5/ambari-server/src/main/resources/custom_actions/scripts/check_host.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_actions/scripts/check_host.py b/ambari-server/src/main/resources/custom_actions/scripts/check_host.py
index a1c93b1..9aa3afb 100644
--- a/ambari-server/src/main/resources/custom_actions/scripts/check_host.py
+++ b/ambari-server/src/main/resources/custom_actions/scripts/check_host.py
@@ -69,7 +69,8 @@ JARS_PATH_IN_ARCHIVE_SQLA = "/sqla-client-jdbc/java"
 LIBS_PATH_IN_ARCHIVE_SQLA = "/sqla-client-jdbc/native/lib64"
 JDBC_DRIVER_SQLA_JAR_PATH_IN_ARCHIVE = "/sqla-client-jdbc/java/" + JDBC_DRIVER_SQLA_JAR
 
-THP_FILE = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
+THP_FILE_REDHAT = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
+THP_FILE_UBUNTU = "/sys/kernel/mm/transparent_hugepage/enabled"
 
 class CheckHost(Script):
   # Packages that are used to find repos (then repos are used to find other packages)
@@ -165,10 +166,14 @@ class CheckHost(Script):
     # Here we are checking transparent huge page if CHECK_TRANSPARENT_HUGE_PAGE is in check_execute_list
     if CHECK_TRANSPARENT_HUGE_PAGE in check_execute_list:
       try :
-        # This file exist only on redhat 6
         thp_regex = "\[(.+)\]"
-        if os.path.isfile(THP_FILE):
-          with open(THP_FILE) as f:
+        file_name = None
+        if OSCheck.is_ubuntu_family():
+          file_name = THP_FILE_UBUNTU
+        elif OSCheck.is_redhat_family():
+          file_name = THP_FILE_REDHAT
+        if os.path.isfile(file_name):
+          with open(file_name) as f:
             file_content = f.read()
             structured_output[CHECK_TRANSPARENT_HUGE_PAGE] = {"exit_code" : 0, "message": str(re.search(thp_regex,
                                                                                             file_content).groups()[0])}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3d36fb5/ambari-server/src/test/python/custom_actions/TestCheckHost.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/custom_actions/TestCheckHost.py b/ambari-server/src/test/python/custom_actions/TestCheckHost.py
index b21254a..1e45927 100644
--- a/ambari-server/src/test/python/custom_actions/TestCheckHost.py
+++ b/ambari-server/src/test/python/custom_actions/TestCheckHost.py
@@ -344,6 +344,7 @@ class TestCheckHost(TestCase):
     pass
 
 
+  @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = ('debian','7','Final')))
   @patch.object(HostCheckReportFileHandler, "resolve_ambari_config")
   @patch("resource_management.libraries.script.Script.put_structured_out")
   @patch.object(Script, 'get_tmp_dir')


[37/50] [abbrv] ambari git commit: AMBARI-13262 NFSGateway reports wrong count after manual stack upgrade. (atkach)

Posted by nc...@apache.org.
AMBARI-13262 NFSGateway reports wrong count after manual stack upgrade. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4119f0e5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4119f0e5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4119f0e5

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 4119f0e5588ee3603efa6e6e57a7856e86b3efe9
Parents: 6a5a6a3
Author: Andrii Tkach <at...@hortonworks.com>
Authored: Mon Sep 28 21:33:36 2015 +0300
Committer: Andrii Tkach <at...@hortonworks.com>
Committed: Mon Sep 28 21:34:04 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/models/service/hdfs.js | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4119f0e5/ambari-web/app/models/service/hdfs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/service/hdfs.js b/ambari-web/app/models/service/hdfs.js
index dbeefe8..cacbd65 100644
--- a/ambari-web/app/models/service/hdfs.js
+++ b/ambari-web/app/models/service/hdfs.js
@@ -30,9 +30,9 @@ App.HDFSService = App.Service.extend({
   dataNodesStarted: DS.attr('number'),
   dataNodesInstalled: DS.attr('number'),
   dataNodesTotal: DS.attr('number'),
-  nfsGatewaysStarted: DS.attr('number'),
-  nfsGatewaysInstalled: DS.attr('number'),
-  nfsGatewaysTotal: DS.attr('number'),
+  nfsGatewaysStarted: DS.attr('number', {defaultValue: 0}),
+  nfsGatewaysInstalled: DS.attr('number', {defaultValue: 0}),
+  nfsGatewaysTotal: DS.attr('number', {defaultValue: 0}),
   journalNodes: DS.hasMany('App.HostComponent'),
   nameNodeStartTime: DS.attr('number'),
   jvmMemoryHeapUsed: DS.attr('number'),


[36/50] [abbrv] ambari git commit: AMBARI-13251. RU - HDFS_Client restart and hdp-select causes dfs_data_dir_mount.hist to be lost, move file to static location (alejandro)

Posted by nc...@apache.org.
AMBARI-13251. RU - HDFS_Client restart and hdp-select causes dfs_data_dir_mount.hist to be lost, move file to static location (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6a5a6a39
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6a5a6a39
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6a5a6a39

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 6a5a6a3963035a9967dc82cd5a1795c23dca2bc3
Parents: 4d44ec7
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Mon Sep 28 11:13:19 2015 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Mon Sep 28 11:16:21 2015 -0700

----------------------------------------------------------------------
 .../src/main/python/ambari_agent/Controller.py  | 28 ++++++++++++++++
 .../resource_management/TestDatanodeHelper.py   |  4 +--
 .../server/upgrade/UpgradeCatalog212.java       | 18 +++++++++++
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml |  7 ----
 .../alerts/alert_datanode_unmounted_data_dir.py | 15 +++------
 .../2.1.0.2.0/package/scripts/hdfs_datanode.py  | 12 +++++--
 .../2.1.0.2.0/package/scripts/params_linux.py   |  2 +-
 .../services/HDFS/configuration/hadoop-env.xml  |  5 ---
 .../0.8/services/HDFS/package/scripts/params.py |  2 +-
 .../services/HDFS/configuration/hadoop-env.xml  |  7 ----
 .../test_alert_datanode_unmounted_data_dir.py   | 34 ++++----------------
 .../python/stacks/2.0.6/HDFS/test_datanode.py   | 16 +++++++--
 .../stacks/2.0.6/configs/altfs_plus_hdfs.json   |  3 +-
 .../stacks/2.0.6/configs/client-upgrade.json    |  1 -
 .../python/stacks/2.0.6/configs/default.json    |  3 +-
 .../2.0.6/configs/default_hive_nn_ha.json       |  3 +-
 .../2.0.6/configs/default_hive_nn_ha_2.json     |  3 +-
 .../2.0.6/configs/default_hive_non_hdfs.json    |  3 +-
 .../2.0.6/configs/default_no_install.json       |  3 +-
 .../2.0.6/configs/default_oozie_mysql.json      |  3 +-
 .../default_update_exclude_file_only.json       |  3 +-
 .../python/stacks/2.0.6/configs/flume_22.json   |  3 +-
 .../python/stacks/2.0.6/configs/hbase-2.2.json  |  1 -
 .../stacks/2.0.6/configs/hbase-check-2.2.json   |  1 -
 .../stacks/2.0.6/configs/hbase-rs-2.2.json      |  1 -
 .../stacks/2.0.6/configs/hbase_no_phx.json      |  3 +-
 .../stacks/2.0.6/configs/hbase_with_phx.json    |  3 +-
 .../python/stacks/2.0.6/configs/nn_ru_lzo.json  |  1 -
 .../2.0.6/configs/oozie_existing_sqla.json      |  3 +-
 .../2.0.6/configs/ranger-namenode-start.json    |  1 -
 .../python/stacks/2.0.6/configs/secured.json    |  3 +-
 .../stacks/2.1/configs/client-upgrade.json      |  1 -
 .../test/python/stacks/2.2/configs/default.json |  1 -
 .../2.2/configs/default_custom_path_config.json |  1 -
 .../stacks/2.2/configs/falcon-upgrade.json      |  1 -
 .../python/stacks/2.2/configs/hive-upgrade.json |  1 -
 .../journalnode-upgrade-hdfs-secure.json        |  1 -
 .../stacks/2.2/configs/journalnode-upgrade.json |  1 -
 .../python/stacks/2.2/configs/knox_upgrade.json |  1 -
 .../stacks/2.2/configs/oozie-downgrade.json     |  1 -
 .../stacks/2.2/configs/oozie-upgrade.json       |  1 -
 .../2.2/configs/pig-service-check-secure.json   |  1 -
 .../2.2/configs/ranger-admin-default.json       |  1 -
 .../2.2/configs/ranger-admin-upgrade.json       |  1 -
 .../2.2/configs/ranger-usersync-upgrade.json    |  1 -
 .../2.2/configs/spark-job-history-server.json   |  1 -
 .../stacks/2.3/configs/hbase_default.json       |  1 -
 .../python/stacks/2.3/configs/hbase_secure.json |  1 -
 .../stacks/2.3/configs/spark_default.json       |  1 -
 .../ru_execute_tasks_namenode_prepare.json      |  1 -
 .../data/configurations/config_versions.json    |  3 --
 .../data/configurations/service_versions.json   |  3 --
 .../data/stacks/HDP-2.2/configurations.json     | 14 --------
 ambari-web/app/data/BIGTOP/site_properties.js   | 12 -------
 ambari-web/app/data/HDP2/site_properties.js     |  8 -----
 55 files changed, 97 insertions(+), 157 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-agent/src/main/python/ambari_agent/Controller.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/Controller.py b/ambari-agent/src/main/python/ambari_agent/Controller.py
index 8746172..74a8eac 100644
--- a/ambari-agent/src/main/python/ambari_agent/Controller.py
+++ b/ambari-agent/src/main/python/ambari_agent/Controller.py
@@ -28,6 +28,7 @@ import threading
 import urllib2
 import pprint
 from random import randint
+import subprocess
 
 import hostname
 import security
@@ -45,6 +46,8 @@ from ambari_agent.ClusterConfiguration import  ClusterConfiguration
 from ambari_agent.RecoveryManager import  RecoveryManager
 from ambari_agent.HeartbeatHandlers import HeartbeatStopHandlers, bind_signal_handlers
 from ambari_agent.ExitHelper import ExitHelper
+from resource_management.libraries.functions.version import compare_versions
+
 logger = logging.getLogger(__name__)
 
 AGENT_AUTO_RESTART_EXIT_CODE = 77
@@ -96,6 +99,8 @@ class Controller(threading.Thread):
 
     self.cluster_configuration = ClusterConfiguration(cluster_config_cache_dir)
 
+    self.move_data_dir_mount_file()
+
     self.alert_scheduler_handler = AlertSchedulerHandler(alerts_cache_dir, 
       stacks_cache_dir, common_services_cache_dir, host_scripts_cache_dir,
       self.cluster_configuration, config)
@@ -435,6 +440,29 @@ class Controller(threading.Thread):
     logger.debug("LiveStatus.CLIENT_COMPONENTS" + str(LiveStatus.CLIENT_COMPONENTS))
     logger.debug("LiveStatus.COMPONENTS" + str(LiveStatus.COMPONENTS))
 
+  def move_data_dir_mount_file(self):
+    """
+    In Ambari 2.1.2, we moved the dfs_data_dir_mount.hist to a static location
+    because /etc/hadoop/conf points to a symlink'ed location that would change during
+    Rolling Upgrade.
+    """
+    try:
+      if compare_versions(self.version, "2.1.2") >= 0:
+        source_file = "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+        destination_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
+        if os.path.exists(source_file) and not os.path.exists(destination_file):
+          command = "mkdir -p %s" % os.path.dirname(destination_file)
+          logger.info("Moving Data Dir Mount History file. Executing command: %s" % command)
+          return_code = subprocess.call(command, shell=True)
+          logger.info("Return code: %d" % return_code)
+
+          command = "mv %s %s" % (source_file, destination_file)
+          logger.info("Moving Data Dir Mount History file. Executing command: %s" % command)
+          return_code = subprocess.call(command, shell=True)
+          logger.info("Return code: %d" % return_code)
+    except Exception, e:
+      logger.info("Exception in move_data_dir_mount_file(). Error: {0}".format(str(e)))
+
 def main(argv=None):
   # Allow Ctrl-C
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py b/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py
index 70539ac..2d9996c 100644
--- a/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py
+++ b/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py
@@ -59,7 +59,7 @@ class TestDatanodeHelper(TestCase):
   grid2 = "/grid/2/data"
 
   params = StubParams()
-  params.data_dir_mount_file = "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+  params.data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
   params.dfs_data_dir = "{0},{1},{2}".format(grid0, grid1, grid2)
 
 
@@ -70,7 +70,7 @@ class TestDatanodeHelper(TestCase):
     Test that the data dirs are normalized by removing leading and trailing whitespace, and case sensitive.
     """
     params = StubParams()
-    params.data_dir_mount_file = "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+    params.data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
     params.dfs_data_dir = "/grid/0/data  ,  /grid/1/data  ,/GRID/2/Data/"
 
     # Function under test

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
index 37a87ab..342e280 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
@@ -199,6 +199,7 @@ public class UpgradeCatalog212 extends AbstractUpgradeCatalog {
     updateHbaseAndClusterConfigurations();
     updateKafkaConfigurations();
     updateStormConfigs();
+    removeDataDirMountConfig();
   }
 
   protected void updateStormConfigs() throws AmbariException {
@@ -381,4 +382,21 @@ public class UpgradeCatalog212 extends AbstractUpgradeCatalog {
     dbAccessor.addColumn(HOST_ROLE_COMMAND_TABLE,
         new DBColumnInfo(HOST_ROLE_COMMAND_SKIP_COLUMN, Integer.class, 1, 0, false));
   }
+
+  protected void removeDataDirMountConfig() throws AmbariException {
+    Set<String> properties = new HashSet<>();
+    properties.add("dfs.datanode.data.dir.mount.file");
+
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+
+    if (clusters != null) {
+      Map<String, Cluster> clusterMap = clusters.getClusters();
+      if (clusterMap != null && !clusterMap.isEmpty()) {
+        for (final Cluster cluster : clusterMap.values()) {
+          removeConfigurationPropertiesFromCluster(cluster, "hadoop-env", properties);
+        }
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
index 8cae26c..5d1d976 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
@@ -174,13 +174,6 @@
     <description>User to run HDFS as</description>
   </property>
   <property>
-    <name>dfs.datanode.data.dir.mount.file</name>
-    <value>/etc/hadoop/conf/dfs_data_dir_mount.hist</value>
-    <display-name>File that stores mount point for each data dir</display-name>
-    <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description>
-  </property>
-
-  <property>
     <name>hdfs_user_nofile_limit</name>
     <value>128000</value>
     <description>Max open files limit setting for HDFS user.</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_datanode_unmounted_data_dir.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_datanode_unmounted_data_dir.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_datanode_unmounted_data_dir.py
index 2912406..f95daac 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_datanode_unmounted_data_dir.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_datanode_unmounted_data_dir.py
@@ -30,7 +30,7 @@ RESULT_STATE_CRITICAL = 'CRITICAL'
 RESULT_STATE_UNKNOWN = 'UNKNOWN'
 
 DFS_DATA_DIR = '{{hdfs-site/dfs.datanode.data.dir}}'
-DATA_DIR_MOUNT_FILE = '{{hadoop-env/dfs.datanode.data.dir.mount.file}}'
+DATA_DIR_MOUNT_FILE = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
 
 logger = logging.getLogger()
 
@@ -62,23 +62,16 @@ def execute(configurations={}, parameters={}, host_name=None):
   if DFS_DATA_DIR not in configurations:
     return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(DFS_DATA_DIR)])
 
-  if DATA_DIR_MOUNT_FILE not in configurations:
-    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(DATA_DIR_MOUNT_FILE)])
-
   dfs_data_dir = configurations[DFS_DATA_DIR]
-  data_dir_mount_file = configurations[DATA_DIR_MOUNT_FILE]
 
   if dfs_data_dir is None:
     return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script and the value is null'.format(DFS_DATA_DIR)])
 
-  if data_dir_mount_file is None:
-    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script and the value is null'.format(DATA_DIR_MOUNT_FILE)])
-
   data_dir_mount_file_exists = True
   # This follows symlinks and will return False for a broken link (even in the middle of the linked list)
-  if not os.path.exists(data_dir_mount_file):
+  if not os.path.exists(DATA_DIR_MOUNT_FILE):
     data_dir_mount_file_exists = False
-    warnings.append("File not found, {0} .".format(data_dir_mount_file))
+    warnings.append("File not found, {0} .".format(DATA_DIR_MOUNT_FILE))
 
   valid_data_dirs = set()            # data dirs that have been normalized
   data_dirs_not_exist = set()        # data dirs that do not exist
@@ -129,7 +122,7 @@ def execute(configurations={}, parameters={}, host_name=None):
     class Params:
       def __init__(self, mount_file):
         self.data_dir_mount_file = mount_file
-    params = Params(data_dir_mount_file)
+    params = Params(DATA_DIR_MOUNT_FILE)
 
     # This dictionary contains the expected values of <data_dir, mount_point>
     # Hence, we only need to analyze the data dirs that are currently on the root partition

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py
index 34ec8cd..e225927 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py
@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-
+import os
 from resource_management import *
 from resource_management.libraries.functions.dfs_datanode_helper import handle_dfs_data_dir
 from utils import service
@@ -48,11 +48,19 @@ def datanode(action=None):
               owner=params.hdfs_user,
               group=params.user_group)
 
+    if not os.path.isdir(os.path.dirname(params.data_dir_mount_file)):
+      Directory(os.path.dirname(params.data_dir_mount_file),
+                recursive=True,
+                mode=0755,
+                owner=params.hdfs_user,
+                group=params.user_group)
+
+    data_dir_to_mount_file_content = handle_dfs_data_dir(create_dirs, params)
     File(params.data_dir_mount_file,
          owner=params.hdfs_user,
          group=params.user_group,
          mode=0644,
-         content=handle_dfs_data_dir(create_dirs, params)
+         content=data_dir_to_mount_file_content
     )
 
   elif action == "start" or action == "stop":

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index 4302976..563c234 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -236,7 +236,7 @@ fs_checkpoint_dirs = default("/configurations/hdfs-site/dfs.namenode.checkpoint.
 dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
 dfs_data_dir = ",".join([re.sub(r'^\[.+\]', '', dfs_dir.strip()) for dfs_dir in dfs_data_dir.split(",")])
 
-data_dir_mount_file = config['configurations']['hadoop-env']['dfs.datanode.data.dir.mount.file']
+data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
 
 # HDFS High Availability properties
 dfs_ha_enabled = False

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml
index c6dd202..2737bf4 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml
@@ -83,11 +83,6 @@
     <property-type>USER</property-type>
     <description>User to run HDFS as</description>
   </property>
-  <property>
-    <name>dfs.datanode.data.dir.mount.file</name>
-    <value>/etc/hadoop/conf/dfs_data_dir_mount.hist</value>
-    <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description>
-  </property>
 
   <!-- hadoop-env.sh -->
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
index fb0a4db..e302685 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
@@ -140,7 +140,7 @@ namenode_formatted_mark_dir = format("/var/lib/hdfs/namenode/formatted/")
 fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir']
 
 dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
-data_dir_mount_file = config['configurations']['hadoop-env']['dfs.datanode.data.dir.mount.file']
+data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
 
 dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
 dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
index 243b4ff..8433005 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
@@ -46,13 +46,6 @@
     </value-attributes>
   </property>
   <property>
-    <name>dfs.datanode.data.dir.mount.file</name>
-    <value>file:///c:/hadoop/conf/dfs_data_dir_mount.hist</value>
-    <display-name>File that stores mount point for each data dir</display-name>
-    <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description>
-  </property>
-
-  <property>
     <name>proxyuser_group</name>
     <deleted>true</deleted>
   </property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_datanode_unmounted_data_dir.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_datanode_unmounted_data_dir.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_datanode_unmounted_data_dir.py
index 4406231..16400b1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_datanode_unmounted_data_dir.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_datanode_unmounted_data_dir.py
@@ -30,7 +30,7 @@ from stacks.utils.RMFTestCase import *
 import resource_management.libraries.functions.file_system
 
 COMMON_SERVICES_ALERTS_DIR = "HDFS/2.1.0.2.0/package/alerts"
-DATA_DIR_MOUNT_HIST_FILE_PATH = "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+DATA_DIR_MOUNT_HIST_FILE_PATH = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
 
 file_path = os.path.dirname(os.path.abspath(__file__))
 file_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(file_path)))))
@@ -69,23 +69,6 @@ class TestAlertDataNodeUnmountedDataDir(RMFTestCase):
       "{{hdfs-site/dfs.datanode.data.dir}}": ""
     }
     [status, messages] = alert.execute(configurations=configs)
-    self.assertEqual(status, RESULT_STATE_UNKNOWN)
-    self.assertTrue(messages is not None and len(messages) == 1)
-    self.assertTrue('is a required parameter for the script' in messages[0])
-
-    configs = {
-      "{{hadoop-env/dfs.datanode.data.dir.mount.file}}": DATA_DIR_MOUNT_HIST_FILE_PATH
-    }
-    [status, messages] = alert.execute(configurations=configs)
-    self.assertEqual(status, RESULT_STATE_UNKNOWN)
-    self.assertTrue(messages is not None and len(messages) == 1)
-    self.assertTrue('is a required parameter for the script' in messages[0])
-
-    configs = {
-      "{{hdfs-site/dfs.datanode.data.dir}}": "",
-      "{{hadoop-env/dfs.datanode.data.dir.mount.file}}": DATA_DIR_MOUNT_HIST_FILE_PATH
-    }
-    [status, messages] = alert.execute(configurations=configs)
     self.assertNotEqual(status, RESULT_STATE_UNKNOWN)
 
   @patch("resource_management.libraries.functions.file_system.get_mount_point_for_dir")
@@ -97,8 +80,7 @@ class TestAlertDataNodeUnmountedDataDir(RMFTestCase):
     does not exist.
     """
     configs = {
-      "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data",
-      "{{hadoop-env/dfs.datanode.data.dir.mount.file}}": DATA_DIR_MOUNT_HIST_FILE_PATH
+      "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data"
     }
 
     # Mock calls
@@ -121,8 +103,7 @@ class TestAlertDataNodeUnmountedDataDir(RMFTestCase):
     and this coincides with the expected values.
     """
     configs = {
-      "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data",
-      "{{hadoop-env/dfs.datanode.data.dir.mount.file}}": DATA_DIR_MOUNT_HIST_FILE_PATH
+      "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data"
     }
 
     # Mock calls
@@ -147,8 +128,7 @@ class TestAlertDataNodeUnmountedDataDir(RMFTestCase):
     Test that the status is OK when the mount points match the expected values.
     """
     configs = {
-      "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data",
-      "{{hadoop-env/dfs.datanode.data.dir.mount.file}}": DATA_DIR_MOUNT_HIST_FILE_PATH
+      "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data"
     }
 
     # Mock calls
@@ -174,8 +154,7 @@ class TestAlertDataNodeUnmountedDataDir(RMFTestCase):
     and at least one data dir is on a mount and at least one data dir is on the root partition.
     """
     configs = {
-      "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data,/grid/3/data",
-      "{{hadoop-env/dfs.datanode.data.dir.mount.file}}": DATA_DIR_MOUNT_HIST_FILE_PATH
+      "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data,/grid/3/data"
     }
 
     # Mock calls
@@ -199,8 +178,7 @@ class TestAlertDataNodeUnmountedDataDir(RMFTestCase):
     became unmounted.
     """
     configs = {
-      "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data,/grid/3/data",
-      "{{hadoop-env/dfs.datanode.data.dir.mount.file}}": DATA_DIR_MOUNT_HIST_FILE_PATH
+      "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data,/grid/3/data"
     }
 
     # Mock calls

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index 1b4bec6..0ec1104 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -349,6 +349,12 @@ class TestDatanode(RMFTestCase):
                               mode = 0751,
                               recursive = True,
                               )
+    self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/datanode',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0755,
+                              recursive = True
+    )
     self.assertResourceCalled('Directory', '/hadoop/hdfs/data',
                               owner = 'hdfs',
                               ignore_failures = True,
@@ -358,7 +364,7 @@ class TestDatanode(RMFTestCase):
                               cd_access='a'
                               )
     content = resource_management.libraries.functions.dfs_datanode_helper.DATA_DIR_TO_MOUNT_HEADER
-    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs_data_dir_mount.hist',
+    self.assertResourceCalled('File', '/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist',
                               owner = 'hdfs',
                               group = 'hadoop',
                               mode = 0644,
@@ -421,6 +427,12 @@ class TestDatanode(RMFTestCase):
                               mode = 0751,
                               recursive = True,
                               )
+    self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/datanode',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0755,
+                              recursive = True
+    )
     self.assertResourceCalled('Directory', '/hadoop/hdfs/data',
                               owner = 'hdfs',
                               ignore_failures = True,
@@ -430,7 +442,7 @@ class TestDatanode(RMFTestCase):
                               cd_access='a'
                               )
     content = resource_management.libraries.functions.dfs_datanode_helper.DATA_DIR_TO_MOUNT_HEADER
-    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs_data_dir_mount.hist',
+    self.assertResourceCalled('File', '/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist',
                               owner = 'hdfs',
                               group = 'hadoop',
                               mode = 0644,

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
index f6c9bb4..3a5a1c6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
@@ -451,8 +451,7 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
index c13e5c9..8e9f81f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
@@ -378,7 +378,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "200m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index a0da7f0..57365b6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -507,8 +507,7 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
index b644411..591f561 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
@@ -255,8 +255,7 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
index 770e085..c66d479 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
@@ -257,8 +257,7 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
index 988f38e..093cb1e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
@@ -507,8 +507,7 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
index 4cf647b..403c48f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
@@ -480,8 +480,7 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
index 286a728..fc8b118 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
@@ -449,8 +449,7 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
index b2fd6e8..6a09aff 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
@@ -441,8 +441,7 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
index 4f30caa..32b40f1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
@@ -441,8 +441,7 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
index e7a516f..f7f8bba 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
@@ -360,7 +360,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "200m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
index 83120eb..3cba607 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
@@ -355,7 +355,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "200m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
index 349a1cf..5060f2c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
@@ -360,7 +360,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "200m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
index b71c4c8..6fd2933 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
@@ -207,8 +207,7 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
index a7187f9..0a1cbaa 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
@@ -207,8 +207,7 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
index a026259..9c8c0e2 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
@@ -152,7 +152,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "256m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
index 01856df..6424403 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
@@ -449,8 +449,7 @@
             "dtnode_heapsize": "1024m",
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024",
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat",

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json
index 84c798b..2a40a46 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json
@@ -234,7 +234,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"",
             "hdfs_user": "hdfs",
             "namenode_opt_newsize": "256m",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist",
             "hadoop_root_logger": "INFO,RFA",
             "hadoop_heapsize": "1024",
             "namenode_opt_maxpermsize": "256m",

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index c9e20e2..a2c41e4 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -568,8 +568,7 @@
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json b/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json
index ca0b1a6..11e18ff 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json
@@ -349,7 +349,6 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
             "hdfs_user": "hdfs", 
             "namenode_opt_newsize": "200m", 
-            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
             "hadoop_root_logger": "INFO,RFA", 
             "hadoop_heapsize": "1024", 
             "namenode_opt_maxpermsize": "256m", 


[24/50] [abbrv] ambari git commit: AMBARI-13249: Fix unit test failure in trunk (jluniya)

Posted by nc...@apache.org.
AMBARI-13249: Fix unit test failure in trunk (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/bb1491f0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/bb1491f0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/bb1491f0

Branch: refs/heads/branch-dev-patch-upgrade
Commit: bb1491f073fd011b91dd58225ff8513923e2629d
Parents: 94baa2f
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Sat Sep 26 13:25:42 2015 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Sat Sep 26 13:25:42 2015 -0700

----------------------------------------------------------------------
 ambari-server/src/test/python/TestAmbariServer.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/bb1491f0/ambari-server/src/test/python/TestAmbariServer.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py
index d4a1450..056dac8 100644
--- a/ambari-server/src/test/python/TestAmbariServer.py
+++ b/ambari-server/src/test/python/TestAmbariServer.py
@@ -2911,7 +2911,7 @@ class TestAmbariServer(TestCase):
   @patch("ambari_server.dbConfiguration_linux.run_os_command")
   def test_get_postgre_status(self, run_os_command_mock):
 
-    run_os_command_mock.return_value = (1, "running", None)
+    run_os_command_mock.return_value = (0, "running", None)
     pg_status, retcode, out, err = PGConfig._get_postgre_status()
     self.assertEqual("running", pg_status)
 


[07/50] [abbrv] ambari git commit: AMBARI-13210: RU - Install version stuck (jluniya)

Posted by nc...@apache.org.
AMBARI-13210: RU - Install version stuck (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/23bf111a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/23bf111a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/23bf111a

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 23bf111a00391dcd3cee4e92d19a136fbdc3ca3c
Parents: 93f86a4
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Thu Sep 24 15:55:36 2015 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Thu Sep 24 15:55:36 2015 -0700

----------------------------------------------------------------------
 .../libraries/functions/hdp_select.py           |  19 ++-
 .../libraries/functions/version_select_util.py  |  20 ++-
 .../DistributeRepositoriesActionListener.java   |  13 +-
 .../custom_actions/scripts/install_packages.py  | 128 +++++++++++++++----
 .../custom_actions/TestInstallPackages.py       |  36 ++++--
 5 files changed, 171 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/23bf111a/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
index 3113c86..5efc07e 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
@@ -18,6 +18,7 @@ limitations under the License.
 
 """
 
+import os
 import sys
 from resource_management.core.logger import Logger
 from resource_management.core.exceptions import Fail
@@ -27,6 +28,7 @@ from resource_management.libraries.functions.get_hdp_version import get_hdp_vers
 from resource_management.libraries.script.script import Script
 from resource_management.core.shell import call
 from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version_select_util import get_versions_from_stack_root
 
 HDP_SELECT_PREFIX = ('ambari-python-wrap', 'hdp-select')
 # hdp-select set oozie-server 2.2.0.0-1234
@@ -240,12 +242,19 @@ def _get_upgrade_stack():
   return None
 
 
-def get_hdp_versions():
+def get_hdp_versions(stack_root):
+  """
+  Gets list of stack versions installed on the host.
+  Be default a call to hdp-select versions is made to get the list of installed stack versions.
+  As a fallback list of installed versions is collected from stack version directories in stack install root.
+  :param stack_root: Stack install root
+  :return: Returns list of installed stack versions.
+  """
   code, out = call(HDP_SELECT_PREFIX + ('versions',))
+  versions = []
   if 0 == code:
-    versions = []
     for line in out.splitlines():
       versions.append(line.rstrip('\n'))
-    return versions
-  else:
-    return []
+  if not versions:
+    versions = get_versions_from_stack_root(stack_root)
+  return versions

http://git-wip-us.apache.org/repos/asf/ambari/blob/23bf111a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
index d1649df..f1a484b 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
@@ -19,6 +19,7 @@ limitations under the License.
 Ambari Agent
 
 """
+import os
 import re
 import tempfile
 
@@ -74,4 +75,21 @@ def get_component_version(stack_name, component_name):
   else:
     Logger.error("Could not find a stack for stack name: %s" % str(stack_name))
 
-  return version
\ No newline at end of file
+  return version
+
+
+def get_versions_from_stack_root(stack_root):
+  """
+  Given a stack install root (/usr/hdp), returns a list of stack versions currently installed.
+  The list of installed stack versions is determined purely based on the stack version directories
+  found in the stack install root.
+  Because each stack name may have different logic, the input is a generic dictionary.
+  :param stack_root: Stack install root directory
+  :return: Returns list of installed stack versions
+  """
+  if stack_root is None or not os.path.exists(stack_root):
+    return []
+
+  installed_stack_versions = [f for f in os.listdir(stack_root) if os.path.isdir(os.path.join(stack_root, f))
+                              and re.match("([\d\.]+(-\d+)?)", f)]
+  return installed_stack_versions

http://git-wip-us.apache.org/repos/asf/ambari/blob/23bf111a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/DistributeRepositoriesActionListener.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/DistributeRepositoriesActionListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/DistributeRepositoriesActionListener.java
index 2c56861..cd82957 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/DistributeRepositoriesActionListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/DistributeRepositoriesActionListener.java
@@ -99,20 +99,23 @@ public class DistributeRepositoriesActionListener {
     String repositoryVersion = null;
 
     if (event.getCommandReport() == null) {
-      LOG.error("Command report is null, will set all INSTALLING versions for host {} to INSTALL_FAILED.", event.getHostname());
+      LOG.error(
+          "Command report is null, will set all INSTALLING versions for host {} to INSTALL_FAILED.",
+          event.getHostname());
+    } else if (!event.getCommandReport().getStatus().equals(HostRoleStatus.COMPLETED.toString())) {
+      LOG.warn(
+          "Distribute repositories did not complete, will set all INSTALLING versions for host {} to INSTALL_FAILED.",
+          event.getHostname());
     } else {
       // Parse structured output
       try {
+        newHostState = RepositoryVersionState.INSTALLED;
         DistributeRepositoriesStructuredOutput structuredOutput = StageUtils.getGson().fromJson(
                 event.getCommandReport().getStructuredOut(),
                 DistributeRepositoriesStructuredOutput.class);
 
         repositoryVersion = structuredOutput.getInstalledRepositoryVersion();
 
-        if (event.getCommandReport().getStatus().equals(HostRoleStatus.COMPLETED.toString())) {
-          newHostState = RepositoryVersionState.INSTALLED;
-        }
-
         // Handle the case in which the version to install did not contain the build number,
         // but the structured output does contain the build number.
         if (null != structuredOutput.getActualVersion() && !structuredOutput.getActualVersion().isEmpty() &&

http://git-wip-us.apache.org/repos/asf/ambari/blob/23bf111a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
index a5fd9f6..bec1c39 100644
--- a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
+++ b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
@@ -88,9 +88,12 @@ class InstallPackages(Script):
           self.stack_root_folder = self.STACK_TO_ROOT_FOLDER[stack_name]
     if self.stack_root_folder is None:
       raise Fail("Cannot determine the stack's root directory by parsing the stack_id property, {0}".format(str(stack_id)))
+    if self.repository_version is None:
+      raise Fail("Cannot determine the repository version to install")
 
     self.repository_version = self.repository_version.strip()
 
+
     # Install/update repositories
     installed_repositories = []
     self.current_repositories = []
@@ -130,24 +133,10 @@ class InstallPackages(Script):
     if num_errors > 0:
       raise Fail("Failed to distribute repositories/install packages")
 
-    # If the repo contains a build number, optimistically assume it to be the actual_version. It will get changed
-    # to correct value if it is not
-    self.actual_version = None
-    if self.repository_version:
-      m = re.search("[\d\.]+-\d+", self.repository_version)
-      if m:
-        # Contains a build number
-        self.repo_version_with_build_number = self.repository_version
-        self.structured_output['actual_version'] = self.repo_version_with_build_number  # This is the best value known so far.
-        self.put_structured_out(self.structured_output)
-      else:
-        self.repo_version_with_build_number = None
-
     # Initial list of versions, used to compute the new version installed
-    self.old_versions = get_hdp_versions()
+    self.old_versions = get_hdp_versions(self.stack_root_folder)
 
     try:
-      # It's possible for the process to receive a SIGTERM while installing the packages
       ret_code = self.install_packages(package_list)
       if ret_code == 0:
         self.structured_output['package_installation_result'] = 'SUCCESS'
@@ -169,18 +158,31 @@ class InstallPackages(Script):
 
   def compute_actual_version(self):
     """
-    After packages are installed, determine what the new actual version is, in order to save it.
+    After packages are installed, determine what the new actual version is.
     """
+
+    # If the repo contains a build number, optimistically assume it to be the actual_version. It will get changed
+    # to correct value if it is not
+    self.actual_version = None
+    self.repo_version_with_build_number = None
+    if self.repository_version:
+      m = re.search("[\d\.]+-\d+", self.repository_version)
+      if m:
+        # Contains a build number
+        self.repo_version_with_build_number = self.repository_version
+        self.structured_output['actual_version'] = self.repo_version_with_build_number  # This is the best value known so far.
+        self.put_structured_out(self.structured_output)
+
     Logger.info("Attempting to determine actual version with build number.")
     Logger.info("Old versions: {0}".format(self.old_versions))
 
-    new_versions = get_hdp_versions()
+    new_versions = get_hdp_versions(self.stack_root_folder)
     Logger.info("New versions: {0}".format(new_versions))
 
     deltas = set(new_versions) - set(self.old_versions)
     Logger.info("Deltas: {0}".format(deltas))
 
-    # Get HDP version without build number
+    # Get version without build number
     normalized_repo_version = self.repository_version.split('-')[0]
 
     if 1 == len(deltas):
@@ -188,21 +190,92 @@ class InstallPackages(Script):
       self.structured_output['actual_version'] = self.actual_version
       self.put_structured_out(self.structured_output)
       write_actual_version_to_history_file(normalized_repo_version, self.actual_version)
+      Logger.info(
+        "Found actual version {0} by checking the delta between versions before and after installing packages".format(
+          self.actual_version))
     else:
-      Logger.info("Cannot determine a new actual version installed by using the delta method.")
       # If the first install attempt does a partial install and is unable to report this to the server,
-      # then a subsequent attempt will report an empty delta. For this reason, it is important to search the
-      # repo version history file to determine if we previously did write an actual_version.
-      self.actual_version = read_actual_version_from_history_file(normalized_repo_version)
+      # then a subsequent attempt will report an empty delta. For this reason, we search for a best fit version for the repo version
+      Logger.info("Cannot determine actual version installed by checking the delta between versions "
+                  "before and after installing package")
+      Logger.info("Will try to find for the actual version by searching for best possible match in the list of versions installed")
+      self.actual_version = self.find_best_fit_version(new_versions, self.repository_version)
       if self.actual_version is not None:
         self.actual_version = self.actual_version.strip()
         self.structured_output['actual_version'] = self.actual_version
         self.put_structured_out(self.structured_output)
-        Logger.info("Found actual version {0} by parsing file {1}".format(self.actual_version, REPO_VERSION_HISTORY_FILE))
-      elif self.repo_version_with_build_number is None:
+        Logger.info("Found actual version {0} by searching for best possible match".format(self.actual_version))
+      else:
         msg = "Could not determine actual version installed. Try reinstalling packages again."
         raise Fail(msg)
 
+  def check_partial_install(self):
+    """
+    If an installation did not complete successfully, check if installation was partially complete and
+    log the partially completed version to REPO_VERSION_HISTORY_FILE.
+    :return:
+    """
+    Logger.info("Installation of packages failed. Checking if installation was partially complete")
+    Logger.info("Old versions: {0}".format(self.old_versions))
+
+    new_versions = get_hdp_versions(self.stack_root_folder)
+    Logger.info("New versions: {0}".format(new_versions))
+
+    deltas = set(new_versions) - set(self.old_versions)
+    Logger.info("Deltas: {0}".format(deltas))
+
+    # Get version without build number
+    normalized_repo_version = self.repository_version.split('-')[0]
+
+    if 1 == len(deltas):
+      # Some packages were installed successfully. Log this version to REPO_VERSION_HISTORY_FILE
+      partial_install_version = next(iter(deltas)).strip()
+      write_actual_version_to_history_file(normalized_repo_version, partial_install_version)
+      Logger.info("Version {0} was partially installed. ".format(partial_install_version))
+
+  def find_best_fit_version(self, versions, repo_version):
+    """
+    Given a list of installed versions and a repo version, search for a version that best fits the repo version
+    If the repo version is found in the list of installed versions, return the repo version itself.
+    If the repo version is not found in the list of installed versions
+    normalize the repo version and use the REPO_VERSION_HISTORY_FILE file to search the list.
+
+    :param versions: List of versions installed
+    :param repo_version: Repo version to search
+    :return: Matching version, None if no match was found.
+    """
+    if versions is None or repo_version is None:
+      return None
+
+    build_num_match = re.search("[\d\.]+-\d+", repo_version)
+    if build_num_match and repo_version in versions:
+      # If repo version has build number and is found in the list of versions, return it as the matching version
+      Logger.info("Best Fit Version: Resolved from repo version with valid build number: {0}".format(repo_version))
+      return repo_version
+
+    # Get version without build number
+    normalized_repo_version = repo_version.split('-')[0]
+
+    # Find all versions that match the normalized repo version
+    match_versions = filter(lambda x: x.startswith(normalized_repo_version), versions)
+    if match_versions:
+
+      if len(match_versions) == 1:
+        # Resolved without conflicts
+        Logger.info("Best Fit Version: Resolved from normalized repo version without conflicts: {0}".format(match_versions[0]))
+        return match_versions[0]
+
+      # Resolve conflicts using REPO_VERSION_HISTORY_FILE
+      history_version = read_actual_version_from_history_file(normalized_repo_version)
+
+      # Validate history version retrieved is valid
+      if history_version in match_versions:
+        Logger.info("Best Fit Version: Resolved from normalized repo version using {0}: {1}".format(REPO_VERSION_HISTORY_FILE, history_version))
+        return history_version
+
+    # No matching version
+    return None
+
 
   def install_packages(self, package_list):
     """
@@ -245,7 +318,10 @@ class InstallPackages(Script):
             Package(package, action="remove")
     # Compute the actual version in order to save it in structured out
     try:
-      self.compute_actual_version()
+      if ret_code == 0:
+         self.compute_actual_version()
+      else:
+        self.check_partial_install()
     except Fail, err:
       ret_code = 1
       Logger.logger.exception("Failure while computing actual version. Error: {0}".format(str(err)))
@@ -297,7 +373,7 @@ class InstallPackages(Script):
 
   def abort_handler(self, signum, frame):
     Logger.error("Caught signal {0}, will handle it gracefully. Compute the actual version if possible before exiting.".format(signum))
-    self.compute_actual_version()
+    self.check_partial_install()
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/ambari/blob/23bf111a/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/custom_actions/TestInstallPackages.py b/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
index 5b2a148..83b6bb5 100644
--- a/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
+++ b/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
@@ -71,7 +71,10 @@ class TestInstallPackages(RMFTestCase):
                             read_actual_version_from_history_file_mock,
                             hdp_versions_mock,
                             put_structured_out_mock, allInstalledPackages_mock, list_ambari_managed_repos_mock):
-    read_actual_version_from_history_file_mock.return_value = VERSION_STUB
+    hdp_versions_mock.side_effect = [
+      [],  # before installation attempt
+      [VERSION_STUB]
+    ]
     allInstalledPackages_mock.side_effect = TestInstallPackages._add_packages
     list_ambari_managed_repos_mock.return_value=[]
     self.executeScript("scripts/install_packages.py",
@@ -126,7 +129,10 @@ class TestInstallPackages(RMFTestCase):
                             read_actual_version_from_history_file_mock,
                             hdp_versions_mock, put_structured_out_mock, allInstalledPackages_mock, list_ambari_managed_repos_mock, is_suse_family_mock):
     is_suse_family_mock = True
-    read_actual_version_from_history_file_mock.return_value = VERSION_STUB
+    hdp_versions_mock.side_effect = [
+      [],  # before installation attempt
+      [VERSION_STUB]
+    ]
     allInstalledPackages_mock.side_effect = TestInstallPackages._add_packages
     list_ambari_managed_repos_mock.return_value=[]
     self.executeScript("scripts/install_packages.py",
@@ -183,7 +189,10 @@ class TestInstallPackages(RMFTestCase):
                                  hdp_versions_mock,
                                  allInstalledPackages_mock, put_structured_out_mock,
                                  is_redhat_family_mock, list_ambari_managed_repos_mock):
-    read_actual_version_from_history_file_mock.return_value = VERSION_STUB
+    hdp_versions_mock.side_effect = [
+      [],  # before installation attempt
+      [VERSION_STUB]
+    ]
     allInstalledPackages_mock.side_effect = TestInstallPackages._add_packages
     list_ambari_managed_repos_mock.return_value=["HDP-UTILS-2.2.0.1-885"]
     is_redhat_family_mock.return_value = True
@@ -274,7 +283,6 @@ class TestInstallPackages(RMFTestCase):
     self.assertTrue(put_structured_out_mock.called)
     self.assertEquals(put_structured_out_mock.call_args[0][0],
                       {'stack_id': 'HDP-2.2',
-                      'actual_version': VERSION_STUB,
                       'installed_repository_version': VERSION_STUB,
                       'ambari_repositories': [],
                       'package_installation_result': 'FAIL'})
@@ -313,6 +321,10 @@ class TestInstallPackages(RMFTestCase):
                                hdp_versions_mock,
                                allInstalledPackages_mock, put_structured_out_mock,
                                package_mock, is_suse_family_mock):
+    hdp_versions_mock.side_effect = [
+      [],  # before installation attempt
+      [VERSION_STUB]
+    ]
     read_actual_version_from_history_file_mock.return_value = VERSION_STUB
     allInstalledPackages_mock = MagicMock(side_effect = TestInstallPackages._add_packages)
     is_suse_family_mock.return_value = True
@@ -562,17 +574,21 @@ class TestInstallPackages(RMFTestCase):
 
     allInstalledPackages_mock.side_effect = TestInstallPackages._add_packages
     list_ambari_managed_repos_mock.return_value = []
-    self.executeScript("scripts/install_packages.py",
+    try:
+      self.executeScript("scripts/install_packages.py",
                        classname="InstallPackages",
                        command="actionexecute",
                        config_dict=command_json,
                        target=RMFTestCase.TARGET_CUSTOM_ACTIONS,
                        os_type=('Redhat', '6.4', 'Final'),
                        )
+      self.fail("Should throw exception")
+    except Fail:
+      pass  # Expected
 
     self.assertTrue(put_structured_out_mock.called)
     self.assertEquals(put_structured_out_mock.call_args[0][0],
-                      {'package_installation_result': 'SUCCESS',
+                      {'package_installation_result': 'FAIL',
                        'stack_id': u'HDP-2.2',
                        'installed_repository_version': VERSION_STUB,
                        'actual_version': VERSION_STUB,
@@ -806,17 +822,21 @@ class TestInstallPackages(RMFTestCase):
 
     allInstalledPackages_mock.side_effect = TestInstallPackages._add_packages
     list_ambari_managed_repos_mock.return_value = []
-    self.executeScript("scripts/install_packages.py",
+    try:
+      self.executeScript("scripts/install_packages.py",
                        classname="InstallPackages",
                        command="actionexecute",
                        config_dict=command_json,
                        target=RMFTestCase.TARGET_CUSTOM_ACTIONS,
                        os_type=('Redhat', '6.4', 'Final'),
                        )
+      self.fail("Should throw exception")
+    except Fail:
+      pass  # Expected
 
     self.assertTrue(put_structured_out_mock.called)
     self.assertEquals(put_structured_out_mock.call_args[0][0],
-                      {'package_installation_result': 'SUCCESS',
+                      {'package_installation_result': 'FAIL',
                        'stack_id': u'HDP-2.2',
                        'installed_repository_version': VERSION_STUB,
                        'actual_version': VERSION_STUB,


[13/50] [abbrv] ambari git commit: AMBARI-13165. Oozie service have "Database host" required config after ambari upgrade from 1.7.0 to 2.1.2 - additional patch (dlysnichenko)

Posted by nc...@apache.org.
AMBARI-13165. Oozie service have "Database host" required config after ambari upgrade from 1.7.0 to 2.1.2 - additional patch (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ad31c9e8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ad31c9e8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ad31c9e8

Branch: refs/heads/branch-dev-patch-upgrade
Commit: ad31c9e8deed0e15a7fdfd5b7704a095882ac982
Parents: 31096c8
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Fri Sep 25 18:16:21 2015 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Fri Sep 25 18:17:17 2015 +0300

----------------------------------------------------------------------
 .../org/apache/ambari/server/upgrade/UpgradeCatalog212.java  | 8 ++++----
 .../apache/ambari/server/upgrade/UpgradeCatalog212Test.java  | 2 +-
 2 files changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ad31c9e8/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
index 12e3ce6..37a87ab 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
@@ -331,18 +331,18 @@ public class UpgradeCatalog212 extends AbstractUpgradeCatalog {
             Map<String, String> oozieEnvProperties = oozieEnv.getProperties();
 
             String hostname = oozieEnvProperties.get("oozie_hostname");
-            String db_type = oozieEnvProperties.get("oozie_ambari_database");
+            String db_type = oozieEnvProperties.get("oozie_database");
             String final_db_host = null;
             // fix for empty hostname after 1.7 -> 2.1.x+ upgrade
             if (hostname != null && db_type != null && hostname.equals("")) {
               switch (db_type.toUpperCase()) {
-                case "MYSQL":
+                case "EXISTING MYSQL DATABASE":
                   final_db_host = oozieEnvProperties.get("oozie_existing_mysql_host");
                   break;
-                case "POSTGRESQL":
+                case "EXISTING POSTGRESQL DATABASE":
                   final_db_host = oozieEnvProperties.get("oozie_existing_postgresql_host");
                   break;
-                case "ORACLE":
+                case "EXISTING ORACLE DATABASE":
                   final_db_host = oozieEnvProperties.get("oozie_existing_oracle_host");
                   break;
                 default:

http://git-wip-us.apache.org/repos/asf/ambari/blob/ad31c9e8/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java
index f786052..d427e1a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java
@@ -335,7 +335,7 @@ public class UpgradeCatalog212Test {
 
     final Map<String, String> propertiesExpectedOozieEnv = new HashMap<String, String>() {{
       put("oozie_hostname", "");
-      put("oozie_ambari_database", "123");
+      put("oozie_database", "123");
     }};
 
     final Injector mockInjector = Guice.createInjector(new AbstractModule() {


[14/50] [abbrv] ambari git commit: AMBARI-13241 Post RU dfs_data_dir_mount.hist is lost (dsen)

Posted by nc...@apache.org.
AMBARI-13241 Post RU dfs_data_dir_mount.hist is lost (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/80e2f203
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/80e2f203
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/80e2f203

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 80e2f2036539e5cf55924c61ab56599092c1987d
Parents: ad31c9e
Author: Dmytro Sen <ds...@apache.org>
Authored: Fri Sep 25 20:06:24 2015 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Fri Sep 25 20:06:24 2015 +0300

----------------------------------------------------------------------
 .../resource_management/TestDatanodeHelper.py   | 45 ++------------------
 .../libraries/functions/dfs_datanode_helper.py  | 42 ++++++------------
 .../2.1.0.2.0/package/scripts/hdfs_datanode.py  |  8 +++-
 .../python/stacks/2.0.6/HDFS/test_datanode.py   | 16 +++++++
 4 files changed, 40 insertions(+), 71 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/80e2f203/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py b/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py
index a74cc0b..70539ac 100644
--- a/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py
+++ b/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py
@@ -65,8 +65,7 @@ class TestDatanodeHelper(TestCase):
 
   @patch.object(Logger, "info")
   @patch.object(Logger, "error")
-  @patch.object(dfs_datanode_helper, "_write_data_dir_to_mount_in_file")
-  def test_normalized(self, mock_write_data_dir_to_file, log_error, log_info):
+  def test_normalized(self, log_error, log_info):
     """
     Test that the data dirs are normalized by removing leading and trailing whitespace, and case sensitive.
     """
@@ -88,41 +87,13 @@ class TestDatanodeHelper(TestCase):
 
     self.assertEquals(0, log_error.call_count)
 
-
-  @patch.object(Logger, "info")
-  @patch.object(Logger, "error")
-  @patch.object(dfs_datanode_helper, "_write_data_dir_to_mount_in_file")
-  @patch.object(dfs_datanode_helper, "get_mount_point_for_dir")
-  @patch.object(os.path, "isdir")
-  def test_save_mount_points(self, mock_os_isdir, mock_get_mount_point, mock_write_data_dir_to_mount_in_file, log_error, log_info):
-    """
-    Test when all mounts are on root.
-    """
-    mock_get_mount_point.side_effect = ["/", "/", "/"] * 2
-    mock_os_isdir.side_effect = [False, False, False] + [True, True, True]
-    mock_write_data_dir_to_mount_in_file.return_value = True
-
-    # Function under test
-    dfs_datanode_helper.handle_dfs_data_dir(fake_create_dir, self.params, update_cache=False)
-
-    for (name, args, kwargs) in log_info.mock_calls:
-      print args[0]
-
-    for (name, args, kwargs) in log_error.mock_calls:
-      print args[0]
-
-    self.assertEquals(0, log_error.call_count)
-    mock_write_data_dir_to_mount_in_file.assert_called_once_with(self.params, {self.grid0: "/", self.grid1: "/", self.grid2: "/"})
-
-
   @patch.object(Logger, "info")
   @patch.object(Logger, "error")
   @patch.object(dfs_datanode_helper, "get_data_dir_to_mount_from_file")
-  @patch.object(dfs_datanode_helper, "_write_data_dir_to_mount_in_file")
   @patch.object(dfs_datanode_helper, "get_mount_point_for_dir")
   @patch.object(os.path, "isdir")
   @patch.object(os.path, "exists")
-  def test_grid_becomes_unmounted(self, mock_os_exists, mock_os_isdir, mock_get_mount_point, mock_write_data_dir_to_mount_in_file, mock_get_data_dir_to_mount_from_file, log_error, log_info):
+  def test_grid_becomes_unmounted(self, mock_os_exists, mock_os_isdir, mock_get_mount_point, mock_get_data_dir_to_mount_from_file, log_error, log_info):
     """
     Test when grid2 becomes unmounted
     """
@@ -134,7 +105,6 @@ class TestDatanodeHelper(TestCase):
     # Grid2 then becomes unmounted
     mock_get_mount_point.side_effect = ["/dev0", "/dev1", "/"] * 2
     mock_os_isdir.side_effect = [False, False, False] + [True, True, True]
-    mock_write_data_dir_to_mount_in_file.return_value = True
 
     # Function under test
     dfs_datanode_helper.handle_dfs_data_dir(fake_create_dir, self.params, update_cache=False)
@@ -151,18 +121,13 @@ class TestDatanodeHelper(TestCase):
     self.assertEquals(1, log_error.call_count)
     self.assertTrue("Directory /grid/2/data does not exist and became unmounted from /dev2" in error_msg)
 
-    # Notice that grid2 is still written with its original mount point because an error occurred on it
-    mock_write_data_dir_to_mount_in_file.assert_called_once_with(self.params, {self.grid0: "/dev0", self.grid1: "/dev1", self.grid2: "/dev2"})
-
-
   @patch.object(Logger, "info")
   @patch.object(Logger, "error")
   @patch.object(dfs_datanode_helper, "get_data_dir_to_mount_from_file")
-  @patch.object(dfs_datanode_helper, "_write_data_dir_to_mount_in_file")
   @patch.object(dfs_datanode_helper, "get_mount_point_for_dir")
   @patch.object(os.path, "isdir")
   @patch.object(os.path, "exists")
-  def test_grid_becomes_remounted(self, mock_os_exists, mock_os_isdir, mock_get_mount_point, mock_write_data_dir_to_mount_in_file, mock_get_data_dir_to_mount_from_file, log_error, log_info):
+  def test_grid_becomes_remounted(self, mock_os_exists, mock_os_isdir, mock_get_mount_point, mock_get_data_dir_to_mount_from_file, log_error, log_info):
     """
     Test when grid2 becomes remounted
     """
@@ -174,7 +139,6 @@ class TestDatanodeHelper(TestCase):
     # Grid2 then becomes remounted
     mock_get_mount_point.side_effect = ["/dev0", "/dev1", "/dev2"] * 2
     mock_os_isdir.side_effect = [False, False, False] + [True, True, True]
-    mock_write_data_dir_to_mount_in_file.return_value = True
 
     # Function under test
     dfs_datanode_helper.handle_dfs_data_dir(fake_create_dir, self.params, update_cache=False)
@@ -186,6 +150,3 @@ class TestDatanodeHelper(TestCase):
       print args[0]
 
     self.assertEquals(0, log_error.call_count)
-
-    # Notice that grid2 is now written with its new mount point to prevent a regression
-    mock_write_data_dir_to_mount_in_file.assert_called_once_with(self.params, {self.grid0: "/dev0", self.grid1: "/dev1", self.grid2: "/dev2"})
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/80e2f203/ambari-common/src/main/python/resource_management/libraries/functions/dfs_datanode_helper.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/dfs_datanode_helper.py b/ambari-common/src/main/python/resource_management/libraries/functions/dfs_datanode_helper.py
index 778d869..33e7b41 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/dfs_datanode_helper.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/dfs_datanode_helper.py
@@ -25,31 +25,14 @@ import os
 from resource_management.libraries.functions.file_system import get_mount_point_for_dir, get_and_cache_mount_points
 from resource_management.core.logger import Logger
 
-
-def _write_data_dir_to_mount_in_file(params, new_data_dir_to_mount_point):
-  """
-  :param new_data_dir_to_mount_point: Dictionary to write to the data_dir_mount_file file, where
-  the key is each DFS data dir, and the value is its current mount point.
-  :return: Returns True on success, False otherwise.
-  """
-  # Overwrite the existing file, or create it if doesn't exist
-  if params.data_dir_mount_file:
-    try:
-      with open(str(params.data_dir_mount_file), "w") as f:
-        f.write("# This file keeps track of the last known mount-point for each DFS data dir.\n")
-        f.write("# It is safe to delete, since it will get regenerated the next time that the DataNode starts.\n")
-        f.write("# However, it is not advised to delete this file since Ambari may \n")
-        f.write("# re-create a DFS data dir that used to be mounted on a drive but is now mounted on the root.\n")
-        f.write("# Comments begin with a hash (#) symbol\n")
-        f.write("# data_dir,mount_point\n")
-        for kv in new_data_dir_to_mount_point.iteritems():
-          f.write(kv[0] + "," + kv[1] + "\n")
-    except Exception, e:
-      Logger.error("Encountered error while attempting to save DFS data dir mount mount values to file %s" %
-                   str(params.data_dir_mount_file))
-      return False
-  return True
-
+DATA_DIR_TO_MOUNT_HEADER = """
+# This file keeps track of the last known mount-point for each DFS data dir.
+# It is safe to delete, since it will get regenerated the next time that the DataNode starts.
+# However, it is not advised to delete this file since Ambari may
+# re-create a DFS data dir that used to be mounted on a drive but is now mounted on the root.
+# Comments begin with a hash (#) symbol
+# data_dir,mount_point
+"""
 
 def get_data_dir_to_mount_from_file(params):
   """
@@ -96,6 +79,7 @@ def handle_dfs_data_dir(func, params, update_cache=True):
                will be called as func(data_dir, params)
   :param params: parameters to pass to function pointer
   :param update_cache: Bool indicating whether to update the global cache of mount points
+  :return: Returns a data_dir_mount_file content
   """
 
   # Get the data dirs that Ambari knows about and their last known mount point
@@ -172,9 +156,6 @@ def handle_dfs_data_dir(func, params, update_cache=True):
       curr_mount_point = get_mount_point_for_dir(data_dir)
       data_dir_to_mount_point[data_dir] = curr_mount_point
 
-  # Save back to the file
-  _write_data_dir_to_mount_in_file(params, data_dir_to_mount_point)
-
   if error_messages and len(error_messages) > 0:
     header = " ERROR ".join(["*****"] * 6)
     header = "\n" + "\n".join([header, ] * 3) + "\n"
@@ -183,4 +164,9 @@ def handle_dfs_data_dir(func, params, update_cache=True):
           "root partition, either update the contents of {0}, or delete that file.".format(params.data_dir_mount_file)
     Logger.error(header + msg + header)
 
+  data_dir_to_mount = DATA_DIR_TO_MOUNT_HEADER
+  for kv in data_dir_to_mount_point.iteritems():
+    data_dir_to_mount += kv[0] + "," + kv[1] + "\n"
+
+  return data_dir_to_mount
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/80e2f203/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py
index df847bd..34ec8cd 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py
@@ -48,7 +48,13 @@ def datanode(action=None):
               owner=params.hdfs_user,
               group=params.user_group)
 
-    handle_dfs_data_dir(create_dirs, params)
+    File(params.data_dir_mount_file,
+         owner=params.hdfs_user,
+         group=params.user_group,
+         mode=0644,
+         content=handle_dfs_data_dir(create_dirs, params)
+    )
+
   elif action == "start" or action == "stop":
     import params
     service(

http://git-wip-us.apache.org/repos/asf/ambari/blob/80e2f203/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index 8e6e386..72925a0 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -23,6 +23,8 @@ from mock.mock import MagicMock, patch
 from resource_management.libraries.script import Script
 from resource_management.core import shell
 from resource_management.core.exceptions import Fail
+import resource_management.libraries.functions.dfs_datanode_helper
+
 
 class TestDatanode(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
@@ -355,6 +357,13 @@ class TestDatanode(RMFTestCase):
                               recursive = True,
                               cd_access='a'
                               )
+    content = resource_management.libraries.functions.dfs_datanode_helper.DATA_DIR_TO_MOUNT_HEADER
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs_data_dir_mount.hist',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0644,
+                              content = content
+                              )
 
   def assert_configure_secured(self, stackVersion=STACK_VERSION, snappy_enabled=True):
     conf_dir = '/etc/hadoop/conf'
@@ -420,6 +429,13 @@ class TestDatanode(RMFTestCase):
                               recursive = True,
                               cd_access='a'
                               )
+    content = resource_management.libraries.functions.dfs_datanode_helper.DATA_DIR_TO_MOUNT_HEADER
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs_data_dir_mount.hist',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0644,
+                              content = content
+                              )
 
 
   def test_pre_rolling_restart(self):


[11/50] [abbrv] ambari git commit: AMBARI-13237. Storm performance issues with topology.metrics.consumer.register enabled. (swagle)

Posted by nc...@apache.org.
AMBARI-13237. Storm performance issues with topology.metrics.consumer.register enabled. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/360e6088
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/360e6088
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/360e6088

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 360e608861229d7052b49ba44edbd9e3a3cd3854
Parents: cda6853
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Thu Sep 24 18:32:28 2015 -0700
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Thu Sep 24 18:32:28 2015 -0700

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog212.java       | 21 ++++++++++++++++++++
 .../0.1.0/configuration/storm-site.xml          |  5 -----
 2 files changed, 21 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/360e6088/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
index cab9d3c..12e3ce6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
@@ -19,6 +19,7 @@
 package org.apache.ambari.server.upgrade;
 
 import java.sql.SQLException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -197,6 +198,26 @@ public class UpgradeCatalog212 extends AbstractUpgradeCatalog {
     updateOozieConfigs();
     updateHbaseAndClusterConfigurations();
     updateKafkaConfigurations();
+    updateStormConfigs();
+  }
+
+  protected void updateStormConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+
+    if (clusters != null) {
+      Map<String, Cluster> clusterMap = clusters.getClusters();
+
+      if ((clusterMap != null) && !clusterMap.isEmpty()) {
+        // Iterate through the clusters and perform any configuration updates
+        for (final Cluster cluster : clusterMap.values()) {
+          Set<String> removes = new HashSet<String>();
+          removes.add("topology.metrics.consumer.register");
+          updateConfigurationPropertiesForCluster(cluster, "storm-site",
+            new HashMap<String, String>(), removes, false, false);
+        }
+      }
+    }
   }
 
   protected void updateKafkaConfigurations() throws AmbariException {

http://git-wip-us.apache.org/repos/asf/ambari/blob/360e6088/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/storm-site.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/storm-site.xml
index 35456d7..b62d34b 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/storm-site.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/storm-site.xml
@@ -22,11 +22,6 @@
 
 <configuration supports_final="true">
   <property>
-    <name>topology.metrics.consumer.register</name>
-    <value>[{'class': 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink', 'parallelism.hint': 1}]</value>
-    <description>Topology metrics consumer register parameters.</description>
-  </property>
-  <property>
     <name>metrics.reporter.register</name>
     <value>org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter</value>
     <description>Topology metrics reporter.</description>


[27/50] [abbrv] ambari git commit: AMBARI-13247 All config controls are shown in Advanced Tab. (ababiichuk)

Posted by nc...@apache.org.
AMBARI-13247 All config controls are shown in Advanced Tab. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/360a4b4a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/360a4b4a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/360a4b4a

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 360a4b4a2f6a325e0d51792124d394d180e4788d
Parents: 3e7308a
Author: aBabiichuk <ab...@cybervisiontech.com>
Authored: Mon Sep 28 09:55:05 2015 +0300
Committer: aBabiichuk <ab...@cybervisiontech.com>
Committed: Mon Sep 28 09:55:05 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/messages.js                      |  2 -
 .../configs/objects/service_config_property.js  | 60 ++----------
 .../app/templates/common/configs/controls.hbs   | 61 ++++++++++++
 .../common/configs/service_config_category.hbs  | 51 +---------
 .../controls_slave_component_groups_menu.hbs    | 25 -----
 ambari-web/app/views.js                         |  1 +
 .../app/views/common/configs/controls_view.js   | 57 +++++++++++
 ambari-web/app/views/common/controls_view.js    | 99 --------------------
 .../objects/service_config_property_test.js     | 35 -------
 9 files changed, 126 insertions(+), 265 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/360a4b4a/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index d30af64..616eeca 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -481,8 +481,6 @@ Em.I18n.translations = {
   'installer.controls.serviceConfigMultipleHosts.other':'1 other',
   'installer.controls.serviceConfigMultipleHosts.others':'{0} others',
   'installer.controls.serviceConfigMasterHosts.header':'{0} Hosts',
-  'installer.controls.addSlaveComponentGroupButton.title':'Add a {0} Group',
-  'installer.controls.addSlaveComponentGroupButton.content':'If you need different settings on certain {0}s, you can add a {1} group.<br>All {2}s within the same group will have the same set of settings.  You can create multiple groups.',
   'installer.controls.slaveComponentChangeGroupName.error':'group with this name already exist',
 
   'installer.step0.header':'Get Started',

http://git-wip-us.apache.org/repos/asf/ambari/blob/360a4b4a/ambari-web/app/models/configs/objects/service_config_property.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/objects/service_config_property.js b/ambari-web/app/models/configs/objects/service_config_property.js
index 0462809..632cc1e 100644
--- a/ambari-web/app/models/configs/objects/service_config_property.js
+++ b/ambari-web/app/models/configs/objects/service_config_property.js
@@ -137,8 +137,6 @@ App.ServiceConfigProperty = Em.Object.extend({
   }.property('errorMessage', 'warnMessage', 'overrideErrorTrigger'),
 
   overrideErrorTrigger: 0, //Trigger for overridable property error
-  isRestartRequired: false,
-  restartRequiredMessage: 'Restart required',
   index: null, //sequence number in category
   editDone: false, //Text field: on focusOut: true, on focusIn: false
   isNotSaved: false, // user property was added but not saved
@@ -177,12 +175,11 @@ App.ServiceConfigProperty = Em.Object.extend({
     var editable = this.get('isEditable');
     var overrides = this.get('overrides');
     var dt = this.get('displayType');
-    return overrideable && (editable || !overrides || !overrides.length) && ("componentHost" != dt);
+    return overrideable && (editable || !overrides || !overrides.length) && (!["componentHost", "password"].contains(dt));
   }.property('isEditable', 'displayType', 'isOverridable', 'overrides.length'),
 
   isOverridden: function() {
-    var overrides = this.get('overrides');
-    return (overrides != null && overrides.get('length')>0) || !this.get('isOriginalSCP');
+    return (this.get('overrides') != null && this.get('overrides.length') > 0) || !this.get('isOriginalSCP');
   }.property('overrides', 'overrides.length', 'isOriginalSCP'),
 
   isOverrideChanged: function () {
@@ -192,14 +189,9 @@ App.ServiceConfigProperty = Em.Object.extend({
   }.property('isOverridden', 'overrides.@each.isNotDefaultValue', 'overrideValues.length'),
 
   isRemovable: function() {
-    var isOriginalSCP = this.get('isOriginalSCP');
-    var isUserProperty = this.get('isUserProperty');
-    var isRequiredByAgent = this.get('isRequiredByAgent');
-    var isEditable = this.get('isEditable');
-    var hasOverrides = this.get('overrides.length') > 0;
-    // Removable when this is a user property, or it is not an original property and it is editable
-    return isEditable && !hasOverrides && isRequiredByAgent && (isUserProperty || !isOriginalSCP);
-  }.property('isUserProperty', 'isOriginalSCP', 'overrides.length'),
+    return this.get('isEditable') && this.get('isRequiredByAgent') && !(this.get('overrides.length') > 0)
+       && (this.get('isUserProperty') || !this.get('isOriginalSCP'));
+  }.property('isUserProperty', 'isOriginalSCP', 'overrides.length', 'isRequiredByAgent'),
 
   init: function () {
     if (this.get('value') == '') {
@@ -209,7 +201,7 @@ App.ServiceConfigProperty = Em.Object.extend({
         this.set('value', this.get('recommendedValue'));
       }
     }
-    if(this.get("displayType") === "password"){
+    if(this.get("displayType") === "password") {
       this.set('retypedPassword', this.get('value'));
       this.set('recommendedValue', '');
     }
@@ -242,46 +234,6 @@ App.ServiceConfigProperty = Em.Object.extend({
     return ["componentHost", "componentHosts", "radio button"].contains(this.get('displayType'));
   }.property('displayType'),
 
-  /**
-   * Used in <code>templates/common/configs/service_config_category.hbs</code>
-   * @type {boolean}
-   */
-  undoAvailable: function () {
-    return !this.get('cantBeUndone') && this.get('isNotDefaultValue');
-  }.property('cantBeUndone', 'isNotDefaultValue'),
-
-  /**
-   * Used in <code>templates/common/configs/service_config_category.hbs</code>
-   * @type {boolean}
-   */
-  removeAvailable: function () {
-    return this.get('isRemovable') && !this.get('isComparison');
-  }.property('isComparison', 'isRemovable'),
-
-  /**
-   * Used in <code>templates/common/configs/service_config_category.hbs</code>
-   * @type {boolean}
-   */
-  switchGroupAvailable: function () {
-    return !this.get('isEditable') && this.get('group');
-  }.property('isEditable', 'group'),
-
-  /**
-   * Used in <code>templates/common/configs/service_config_category.hbs</code>
-   * @type {boolean}
-   */
-  setRecommendedAvailable: function () {
-    return this.get('isEditable') && this.get('recommendedValueExists');
-  }.property('isEditable', 'recommendedValueExists'),
-
-  /**
-   * Used in <code>templates/common/configs/service_config_category.hbs</code>
-   * @type {boolean}
-   */
-  overrideAvailable: function () {
-    return !this.get('isComparison') && this.get('isPropertyOverridable') && (this.get('displayType') !== 'password');
-  }.property('isPropertyOverridable', 'isComparison'),
-
   isValid: function () {
     return this.get('errorMessage') === '';
   }.property('errorMessage'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/360a4b4a/ambari-web/app/templates/common/configs/controls.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/configs/controls.hbs b/ambari-web/app/templates/common/configs/controls.hbs
new file mode 100644
index 0000000..b6439cb
--- /dev/null
+++ b/ambari-web/app/templates/common/configs/controls.hbs
@@ -0,0 +1,61 @@
+{{!
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+}}
+
+{{#if view.showSwitchToGroup}}
+  <a rel='SwitchGroupTooltip' {{bindAttr data-original-title="this.group.switchGroupTextShort" class="switchGroupAvailable:show:hide :action"}}
+    {{action selectConfigGroup group target="controller"}}>
+    {{group.switchGroupTextShort}}
+  </a>
+{{else}}
+  {{#if view.showIsFinal}}
+    <a href="#" data-toggle="tooltip"
+      {{bindAttr class=":btn-small :btn-final view.serviceConfigProperty.isFinal:active view.serviceConfigProperty.hideFinalIcon:hidden" disabled="view.serviceConfigProperty.isNotEditable"}}
+      {{action "toggleFinalFlag" this target="view.parentView"}}
+      {{translateAttr data-original-title="services.service.config.final"}}>
+      <i class="icon-lock"></i>
+    </a>
+  {{/if}}
+  {{#if view.showOverride}}
+    <a href="#" data-toggle="tooltip" class="btn-small"
+      {{action "createOverrideProperty" this target="view.parentView"}}
+      {{translateAttr data-original-title="common.override"}}>
+      <i class="icon-plus-sign"></i>
+    </a>
+  {{/if}}
+  {{#if view.showUndo}}
+    <a href="#" data-toggle="tooltip" class="btn-small"
+      {{action "doRestoreDefaultValue" this target="view.parentView"}}
+      {{translateAttr data-original-title="common.undo"}}>
+      <i class="icon-undo"></i>
+    </a>
+  {{/if}}
+  {{#if view.showRemove}}
+    <a href="#" data-toggle="tooltip" class="btn-small"
+      {{action "removeProperty" this target="view.parentView"}}
+      {{translateAttr data-original-title="common.remove"}}>
+      <i class="icon-minus-sign"></i>
+    </a>
+  {{/if}}
+  {{#if view.showSetRecommended}}
+    <a href="#" data-toggle="tooltip" class="btn-small"
+      {{action "setRecommendedValue" this target="view.parentView"}}
+      {{translateAttr data-original-title="services.service.config.setRecommendedValue"}}>
+      <i class="icon-repeat"></i>
+    </a>
+  {{/if}}
+{{/if}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/360a4b4a/ambari-web/app/templates/common/configs/service_config_category.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/configs/service_config_category.hbs b/ambari-web/app/templates/common/configs/service_config_category.hbs
index 47d3a2e..8cb65ae 100644
--- a/ambari-web/app/templates/common/configs/service_config_category.hbs
+++ b/ambari-web/app/templates/common/configs/service_config_category.hbs
@@ -40,12 +40,6 @@
                     <a href="javascript:void(null);"><i class="icon-lock" rel="tooltip" data-toggle="tooltip"
                                                         title="security knob"></i></a>
                   {{/if}}
-                  {{#if view.supportsHostOverrides}}
-                    {{#if isRestartRequired}}
-                      <i class="icon-refresh restart-required-property"
-                         rel="tooltip" {{bindAttr title="restartRequiredMessage"}}></i>
-                    {{/if}}
-                  {{/if}}
                 </label>
               </span>
             {{/if}}
@@ -65,50 +59,7 @@
                       &nbsp;{{t services.service.config.configHistory.configGroup}}</span>
                   {{/if}}
                 {{/if}}
-                {{#if supportsFinal}}
-                  <a href="#" data-toggle="tooltip"
-                    {{bindAttr class=":btn-small :btn-final isFinal:active hideFinalIcon:hidden" disabled="isNotEditable"}}
-                    {{action "toggleFinalFlag" this target="view"}}
-                    {{translateAttr data-original-title="services.service.config.final"}}>
-                    <i class="icon-lock"></i>
-                  </a>
-                {{/if}}
-                {{#if view.canEdit}}
-                  {{#if view.supportsHostOverrides}}
-                    {{#isAccessible ADMIN}}
-                      <a href="#" data-toggle="tooltip"
-                        {{action "createOverrideProperty" this target="view"}}
-                        {{translateAttr data-original-title="common.override"}}
-                        {{bindAttr class="overrideAvailable::hide :btn-small"}}>
-                        <i class="icon-plus-sign"></i>
-                      </a>
-                    {{/isAccessible}}
-                  {{/if}}
-                  <a href="#" data-toggle="tooltip"
-                    {{action "doRestoreDefaultValue" this target="view"}}
-                    {{translateAttr data-original-title="common.undo"}}
-                    {{bindAttr class="undoAvailable::hide :btn-small"}}>
-                    <i class="icon-undo"></i>
-                  </a>
-                  {{#isAccessible ADMIN}}
-                    <a href="#" data-toggle="tooltip"
-                      {{action "removeProperty" this target="view"}}
-                      {{translateAttr data-original-title="common.remove"}}
-                      {{bindAttr class="removeAvailable::hide :btn-small"}}>
-                      <i class="icon-minus-sign"></i>
-                    </a>
-                  {{/isAccessible}}
-                  <a rel='SwitchGroupTooltip' {{bindAttr data-original-title="this.group.switchGroupTextShort" class="switchGroupAvailable:show:hide :action"}}
-                    {{action selectConfigGroup group target="controller"}}>
-                    {{group.switchGroupTextShort}}
-                  </a>
-                  <a href="#" data-toggle="tooltip"
-                    {{action "setRecommendedValue" this target="view"}}
-                    {{translateAttr data-original-title="services.service.config.setRecommendedValue"}}
-                    {{bindAttr class="setRecommendedAvailable::hide :btn-small"}}>
-                    <i class="icon-repeat"></i>
-                  </a>
-                {{/if}}
+                {{view App.ControlsView serviceConfigPropertyBinding="this"}}
                 <span class="help-inline">{{errorMessage}}</span>
                 <span class="help-inline">{{warnMessage}}</span>
               </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/360a4b4a/ambari-web/app/templates/wizard/controls_slave_component_groups_menu.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/controls_slave_component_groups_menu.hbs b/ambari-web/app/templates/wizard/controls_slave_component_groups_menu.hbs
deleted file mode 100644
index f8eb0d8..0000000
--- a/ambari-web/app/templates/wizard/controls_slave_component_groups_menu.hbs
+++ /dev/null
@@ -1,25 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<a {{action showSlaveComponentGroup view.content target="controller"}} href="#">
-  {{view.content.name}}
-  {{#if view.errorCount}}
-    <span class="badge badge-important">{{view.errorCount}}</span>
-  {{/if}}
-</a>
-<i {{action removeSlaveComponentGroup view.content target="controller"}} class="icon-remove"></i>

http://git-wip-us.apache.org/repos/asf/ambari/blob/360a4b4a/ambari-web/app/views.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views.js b/ambari-web/app/views.js
index 9cc5f0b..3e12998 100644
--- a/ambari-web/app/views.js
+++ b/ambari-web/app/views.js
@@ -75,6 +75,7 @@ require('views/common/filter_combo_cleanable');
 require('views/common/table_view');
 require('views/common/progress_bar_view');
 require('views/common/controls_view');
+require('views/common/configs/controls_view');
 require('views/common/widget/graph_widget_view');
 require('views/common/widget/template_widget_view');
 require('views/common/widget/gauge_widget_view');

http://git-wip-us.apache.org/repos/asf/ambari/blob/360a4b4a/ambari-web/app/views/common/configs/controls_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/controls_view.js b/ambari-web/app/views/common/configs/controls_view.js
new file mode 100644
index 0000000..21965a6
--- /dev/null
+++ b/ambari-web/app/views/common/configs/controls_view.js
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+var App = require('app');
+
+App.ControlsView = Ember.View.extend({
+
+	classNames: ['display-inline-block'],
+
+	templateName: require('templates/common/configs/controls'),
+
+	serviceConfigProperty: null,
+
+	showActions: function() {
+		return App.isAccessible('ADMIN') && this.get('serviceConfigProperty.isEditable') && this.get('serviceConfigProperty.isRequiredByAgent') && !this.get('serviceConfigProperty.isComparison');
+	}.property('serviceConfigProperty.isEditable', 'serviceConfigProperty.isRequiredByAgent', 'serviceConfigProperty.isComparison'),
+
+	showSwitchToGroup: function() {
+		return !this.get('serviceConfigProperty.isEditable') && this.get('serviceConfigProperty.group');
+	}.property('showActions', 'serviceConfigProperty.group'),
+
+	showIsFinal: function() {
+		return this.get('serviceConfigProperty.supportsFinal');
+	}.property('serviceConfigProperty.supportsFinal'),
+
+	showRemove: function() {
+		return this.get('showActions') && this.get('serviceConfigProperty.isRemovable');
+	}.property('showActions', 'serviceConfigProperty.isRemovable'),
+
+	showOverride: function() {
+		return this.get('showActions') && this.get('serviceConfigProperty.isPropertyOverridable');
+	}.property('showActions', 'serviceConfigProperty.isPropertyOverridable'),
+
+	showUndo: function() {
+		return this.get('showActions') && !this.get('serviceConfigProperty.cantBeUndone') && this.get('serviceConfigProperty.isNotDefaultValue');
+	}.property('showActions', 'serviceConfigProperty.cantBeUndone', 'serviceConfigProperty.isNotDefaultValue'),
+
+	showSetRecommended: function() {
+		return this.get('showActions') && this.get('serviceConfigProperty.recommendedValueExists');
+	}.property('showActions', 'serviceConfigProperty.recommendedValueExists')
+
+});
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/360a4b4a/ambari-web/app/views/common/controls_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/controls_view.js b/ambari-web/app/views/common/controls_view.js
index 8d1365a..17eb6bd 100644
--- a/ambari-web/app/views/common/controls_view.js
+++ b/ambari-web/app/views/common/controls_view.js
@@ -859,56 +859,6 @@ App.ServiceConfigMultipleHostsDisplay = Ember.Mixin.create(App.ServiceConfigHost
 
 });
 
-
-/**
- * Show tabs list for slave hosts
- * @type {*}
- */
-App.SlaveComponentGroupsMenu = Em.CollectionView.extend(App.ServiceConfigCalculateId, {
-
-  content: function () {
-    return this.get('controller.componentGroups');
-  }.property('controller.componentGroups'),
-
-  tagName: 'ul',
-  classNames: ["nav", "nav-tabs"],
-
-  itemViewClass: Em.View.extend({
-    classNameBindings: ["active"],
-
-    active: function () {
-      return this.get('content.active');
-    }.property('content.active'),
-
-    errorCount: function () {
-      return this.get('content.properties').filterProperty('isValid', false).filterProperty('isVisible', true).get('length');
-    }.property('content.properties.@each.isValid', 'content.properties.@each.isVisible'),
-
-    templateName: require('templates/wizard/controls_slave_component_groups_menu')
-  })
-
-});
-
-/**
- * <code>Add group</code> button
- * @type {*}
- */
-App.AddSlaveComponentGroupButton = Ember.View.extend(App.ServiceConfigCalculateId, {
-
-  tagName: 'span',
-  slaveComponentName: null,
-
-  didInsertElement: function () {
-    App.popover(this.$(), {
-      title: Em.I18n.t('installer.controls.addSlaveComponentGroupButton.title').format(this.get('slaveComponentName')),
-      content: Em.I18n.t('installer.controls.addSlaveComponentGroupButton.content').format(this.get('slaveComponentName'), this.get('slaveComponentName'), this.get('slaveComponentName')),
-      placement: 'right',
-      trigger: 'hover'
-    });
-  }
-
-});
-
 /**
  * Multiple Slave Hosts component
  * @type {*}
@@ -940,26 +890,6 @@ App.ServiceConfigComponentHostsView = Ember.View.extend(App.ServiceConfigMultipl
 
 });
 
-/**
- * properties for present active slave group
- * @type {*}
- */
-App.SlaveGroupPropertiesView = Ember.View.extend(App.ServiceConfigCalculateId, {
-
-  viewName: 'serviceConfigComponentHostsView',
-
-  group: function () {
-    return this.get('controller.activeGroup');
-  }.property('controller.activeGroup'),
-
-  groupConfigs: function () {
-    return this.get('group.properties');
-  }.property('group.properties.@each').cacheable(),
-
-  errorCount: function () {
-    return this.get('group.properties').filterProperty('isValid', false).filterProperty('isVisible', true).get('length');
-  }.property('configs.@each.isValid', 'configs.@each.isVisible')
-});
 
 /**
  * DropDown component for <code>select hosts for groups</code> popup
@@ -991,35 +921,6 @@ App.SlaveComponentDropDownGroupView = Ember.View.extend(App.ServiceConfigCalcula
 });
 
 /**
- * Show info about current group
- * @type {*}
- */
-App.SlaveComponentChangeGroupNameView = Ember.View.extend(App.ServiceConfigCalculateId, {
-
-  contentBinding: 'controller.activeGroup',
-  classNames: ['control-group'],
-  classNameBindings: 'error',
-  error: false,
-  setError: function () {
-    this.set('error', false);
-  }.observes('controller.activeGroup'),
-  errorMessage: function () {
-    return this.get('error') ? Em.I18n.t('installer.controls.slaveComponentChangeGroupName.error') : '';
-  }.property('error'),
-
-  /**
-   * Onclick handler for saving updated group name
-   * @param event
-   */
-  changeGroupName: function (event) {
-    var inputVal = $('#' + this.get('elementId') + ' input[type="text"]').val();
-    if (inputVal !== this.get('content.name')) {
-      var result = this.get('controller').changeSlaveGroupName(this.get('content'), inputVal);
-      this.set('error', result);
-    }
-  }
-});
-/**
  * View for testing connection to database.
  **/
 App.CheckDBConnectionView = Ember.View.extend({

http://git-wip-us.apache.org/repos/asf/ambari/blob/360a4b4a/ambari-web/test/models/configs/objects/service_config_property_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/models/configs/objects/service_config_property_test.js b/ambari-web/test/models/configs/objects/service_config_property_test.js
index 691d86e..da9e7af 100644
--- a/ambari-web/test/models/configs/objects/service_config_property_test.js
+++ b/ambari-web/test/models/configs/objects/service_config_property_test.js
@@ -474,41 +474,6 @@ describe('App.ServiceConfigProperty', function () {
     });
   });
 
-  describe('#undoAvailable', function () {
-
-    Em.A([
-      {
-        cantBeUndone: true,
-        isNotDefaultValue: true,
-        e: false
-      },
-      {
-        cantBeUndone: false,
-        isNotDefaultValue: true,
-        e: true
-      },
-      {
-        cantBeUndone: true,
-        isNotDefaultValue: false,
-        e: false
-      },
-      {
-        cantBeUndone: false,
-        isNotDefaultValue: false,
-        e: false
-      }
-    ]).forEach(function (test) {
-      it('', function () {
-        serviceConfigProperty.reopen({
-          cantBeUndone: test.cantBeUndone,
-          isNotDefaultValue: test.isNotDefaultValue
-        });
-        expect(serviceConfigProperty.get('undoAvailable')).to.equal(test.e);
-      });
-    });
-
-  });
-
   describe('#overrideIsFinalValues', function () {
     it('should be defined as empty array', function () {
       expect(serviceConfigProperty.get('overrideIsFinalValues')).to.eql([]);


[17/50] [abbrv] ambari git commit: Revert "AMBARI-13229. Detect non-compliant python versions and do not attempt to start Ambari Agent (aonishuk)"

Posted by nc...@apache.org.
Revert "AMBARI-13229. Detect non-compliant python versions and do not attempt to start Ambari Agent (aonishuk)"

This reverts commit 688666137427ccae7ee94df4b160979f87d1f31a.

Conflicts:
	ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e3b0c362
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e3b0c362
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e3b0c362

Branch: refs/heads/branch-dev-patch-upgrade
Commit: e3b0c362fcf5542ce826ba4c798a8be71e2f3bb0
Parents: 9a7ceb5
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Sep 25 15:47:21 2015 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Sep 25 15:47:21 2015 -0400

----------------------------------------------------------------------
 .../libraries/functions/get_hdp_version.py              |  6 ++----
 .../libraries/functions/hdp_select.py                   |  5 ++---
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py          |  2 +-
 .../HDFS/2.1.0.2.0/package/scripts/service_check.py     |  2 +-
 .../RANGER/0.4.0/package/scripts/setup_ranger_xml.py    |  6 +++---
 .../RANGER_KMS/0.5.0.2.3/package/scripts/kms.py         |  6 +++---
 .../main/resources/custom_actions/scripts/ru_set_all.py |  2 +-
 .../src/test/python/custom_actions/test_ru_set_all.py   |  4 ++--
 .../src/test/python/stacks/2.0.6/FLUME/test_flume.py    |  2 +-
 .../test/python/stacks/2.0.6/HBASE/test_hbase_client.py | 12 ++++++------
 .../test/python/stacks/2.0.6/HBASE/test_hbase_master.py |  4 ++--
 .../stacks/2.0.6/HBASE/test_hbase_regionserver.py       |  4 ++--
 .../stacks/2.0.6/HBASE/test_phoenix_queryserver.py      |  2 +-
 .../src/test/python/stacks/2.0.6/HDFS/test_datanode.py  |  4 ++--
 .../test/python/stacks/2.0.6/HDFS/test_hdfs_client.py   |  6 +++---
 .../test/python/stacks/2.0.6/HDFS/test_journalnode.py   |  4 ++--
 .../src/test/python/stacks/2.0.6/HDFS/test_namenode.py  |  4 ++--
 .../test/python/stacks/2.0.6/HDFS/test_nfsgateway.py    |  2 +-
 .../test/python/stacks/2.0.6/HIVE/test_hive_client.py   |  4 ++--
 .../test/python/stacks/2.0.6/HIVE/test_hive_server.py   | 10 +++++-----
 .../python/stacks/2.0.6/HIVE/test_webhcat_server.py     |  4 ++--
 .../test/python/stacks/2.0.6/OOZIE/test_oozie_client.py |  4 ++--
 .../test/python/stacks/2.0.6/OOZIE/test_oozie_server.py |  8 ++++----
 .../src/test/python/stacks/2.0.6/PIG/test_pig_client.py |  4 ++--
 .../src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py    |  2 +-
 .../test/python/stacks/2.0.6/YARN/test_historyserver.py |  2 +-
 .../python/stacks/2.0.6/YARN/test_mapreduce2_client.py  |  4 ++--
 .../test/python/stacks/2.0.6/YARN/test_nodemanager.py   |  2 +-
 .../python/stacks/2.0.6/YARN/test_resourcemanager.py    |  2 +-
 .../test/python/stacks/2.0.6/YARN/test_yarn_client.py   |  4 ++--
 .../stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py     |  4 ++--
 .../stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py     |  4 ++--
 .../test/python/stacks/2.1/FALCON/test_falcon_client.py |  4 ++--
 .../test/python/stacks/2.1/FALCON/test_falcon_server.py |  6 +++---
 .../test/python/stacks/2.1/HIVE/test_hive_metastore.py  |  8 ++++----
 .../python/stacks/2.1/STORM/test_storm_drpc_server.py   |  4 ++--
 .../test/python/stacks/2.1/STORM/test_storm_nimbus.py   |  8 ++++----
 .../python/stacks/2.1/STORM/test_storm_nimbus_prod.py   |  8 ++++----
 .../stacks/2.1/STORM/test_storm_rest_api_service.py     |  4 ++--
 .../python/stacks/2.1/STORM/test_storm_supervisor.py    |  8 ++++----
 .../stacks/2.1/STORM/test_storm_supervisor_prod.py      |  8 ++++----
 .../python/stacks/2.1/STORM/test_storm_ui_server.py     |  4 ++--
 .../src/test/python/stacks/2.1/TEZ/test_tez_client.py   |  6 +++---
 .../python/stacks/2.1/YARN/test_apptimelineserver.py    |  2 +-
 .../python/stacks/2.2/ACCUMULO/test_accumulo_client.py  |  4 ++--
 .../test/python/stacks/2.2/KAFKA/test_kafka_broker.py   |  4 ++--
 .../test/python/stacks/2.2/KNOX/test_knox_gateway.py    |  8 ++++----
 .../test/python/stacks/2.2/RANGER/test_ranger_admin.py  |  2 +-
 .../python/stacks/2.2/RANGER/test_ranger_usersync.py    |  4 ++--
 .../test/python/stacks/2.2/SLIDER/test_slider_client.py |  8 ++++----
 .../python/stacks/2.2/SPARK/test_job_history_server.py  |  2 +-
 .../test/python/stacks/2.2/SPARK/test_spark_client.py   |  2 +-
 .../test/python/stacks/2.3/MAHOUT/test_mahout_client.py |  4 ++--
 .../python/stacks/2.3/SPARK/test_spark_thrift_server.py |  2 +-
 54 files changed, 121 insertions(+), 124 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py
index a56d33a..e8fdbb6 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py
@@ -31,8 +31,6 @@ from resource_management.core.logger import Logger
 from resource_management.core.exceptions import Fail
 from resource_management.core import shell
 
-HDP_SELECT_BINARY = "/usr/bin/hdp-select"
-
 @OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
 def get_hdp_version(package_name):
   """
@@ -65,12 +63,12 @@ def get_hdp_version(package_name):
   @param package_name, name of the package, from which, function will try to get hdp version
   """
   
-  if not os.path.exists(HDP_SELECT_BINARY):
+  if not os.path.exists("/usr/bin/hdp-select"):
     Logger.info('Skipping get_hdp_version since hdp-select is not yet available')
     return None # lazy fail
   
   try:
-    command = 'ambari-python-wrap {HDP_SELECT_BINARY} status {package_name}'.format(HDP_SELECT_BINARY=HDP_SELECT_BINARY, package_name=package_name)
+    command = 'hdp-select status ' + package_name
     return_code, hdp_output = shell.call(command, timeout=20)
   except Exception, e:
     Logger.error(str(e))

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
index 5efc07e..f4f0efc 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
@@ -30,9 +30,8 @@ from resource_management.core.shell import call
 from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.version_select_util import get_versions_from_stack_root
 
-HDP_SELECT_PREFIX = ('ambari-python-wrap', 'hdp-select')
 # hdp-select set oozie-server 2.2.0.0-1234
-TEMPLATE = HDP_SELECT_PREFIX + ('set',)
+TEMPLATE = ('hdp-select', 'set')
 
 # a mapping of Ambari server role to hdp-select component name for all
 # non-clients
@@ -250,7 +249,7 @@ def get_hdp_versions(stack_root):
   :param stack_root: Stack install root
   :return: Returns list of installed stack versions.
   """
-  code, out = call(HDP_SELECT_PREFIX + ('versions',))
+  code, out = call("hdp-select versions")
   versions = []
   if 0 == code:
     for line in out.splitlines():

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 1dfb280..a3c02a6 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -228,7 +228,7 @@ class NameNodeDefault(NameNode):
     basedir = os.path.join(env.config.basedir, 'scripts')
     if(threshold == 'DEBUG'): #FIXME TODO remove this on PROD
       basedir = os.path.join(env.config.basedir, 'scripts', 'balancer-emulator')
-      command = ['ambari-python-wrap','hdfs-command.py']
+      command = ['python','hdfs-command.py']
 
     _print("Executing command %s\n" % command)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py
index b4f44ae..6ec3996 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py
@@ -86,7 +86,7 @@ class HdfsServiceCheckDefault(HdfsServiceCheck):
         checkWebUIFileName = "checkWebUI.py"
         checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
         comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-        checkWebUICmd = format("ambari-python-wrap {checkWebUIFilePath} -m {comma_sep_jn_hosts} -p {journalnode_port} -s {https_only}")
+        checkWebUICmd = format("python {checkWebUIFilePath} -m {comma_sep_jn_hosts} -p {journalnode_port} -s {https_only}")
         File(checkWebUIFilePath,
              content=StaticFile(checkWebUIFileName),
              mode=0775)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
index 36cbe87..77e487f 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
@@ -192,7 +192,7 @@ def setup_ranger_db(rolling_upgrade=False):
   # User wants us to setup the DB user and DB?
   if params.create_db_dbuser:
     Logger.info('Setting up Ranger DB and DB User')
-    dba_setup = format('ambari-python-wrap {ranger_home}/dba_script.py -q')
+    dba_setup = format('python {ranger_home}/dba_script.py -q')
     Execute(dba_setup, 
             environment=env_dict,
             logoutput=True,
@@ -201,7 +201,7 @@ def setup_ranger_db(rolling_upgrade=False):
   else:
     Logger.info('Separate DBA property not set. Assuming Ranger DB and DB User exists!')
 
-  db_setup = format('ambari-python-wrap {ranger_home}/db_setup.py')
+  db_setup = format('python {ranger_home}/db_setup.py')
   Execute(db_setup, 
           environment=env_dict,
           logoutput=True,
@@ -220,7 +220,7 @@ def setup_java_patch(rolling_upgrade=False):
   if params.db_flavor.lower() == 'sqla':
     env_dict = {'RANGER_ADMIN_HOME':ranger_home, 'JAVA_HOME':params.java_home, 'LD_LIBRARY_PATH':params.ld_lib_path}
 
-  setup_java_patch = format('ambari-python-wrap {ranger_home}/db_setup.py -javapatch')
+  setup_java_patch = format('python {ranger_home}/db_setup.py -javapatch')
   Execute(setup_java_patch, 
           environment=env_dict,
           logoutput=True,

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
index e14c209..fafe1ec 100755
--- a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
@@ -98,8 +98,8 @@ def setup_kms_db():
     if params.db_flavor.lower() == 'sqla':
       env_dict = {'RANGER_KMS_HOME':params.kms_home, 'JAVA_HOME': params.java_home, 'LD_LIBRARY_PATH':params.ld_library_path}
 
-    dba_setup = format('ambari-python-wrap {kms_home}/dba_script.py -q')
-    db_setup = format('ambari-python-wrap {kms_home}/db_setup.py')
+    dba_setup = format('python {kms_home}/dba_script.py -q')
+    db_setup = format('python {kms_home}/db_setup.py')
 
     Execute(dba_setup, environment=env_dict, logoutput=True, user=params.kms_user)
     Execute(db_setup, environment=env_dict, logoutput=True, user=params.kms_user)
@@ -109,7 +109,7 @@ def setup_java_patch():
 
   if params.has_ranger_admin:
 
-    setup_java_patch = format('ambari-python-wrap {kms_home}/db_setup.py -javapatch')
+    setup_java_patch = format('python {kms_home}/db_setup.py -javapatch')
 
     env_dict = {'RANGER_KMS_HOME':params.kms_home, 'JAVA_HOME': params.java_home}
     if params.db_flavor.lower() == 'sqla':

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py b/ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py
index ee167f6..6e5ddd2 100644
--- a/ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py
+++ b/ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py
@@ -59,7 +59,7 @@ class UpgradeSetAll(Script):
     real_ver = format_hdp_stack_version(version)
     if stack_name == "HDP":
       if compare_versions(real_ver, min_ver) >= 0:
-        cmd = ('ambari-python-wrap', 'hdp-select', 'set', 'all', version)
+        cmd = ('hdp-select', 'set', 'all', version)
         code, out = shell.call(cmd, sudo=True)
 
       if compare_versions(real_ver, format_hdp_stack_version("2.3")) >= 0:

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/custom_actions/test_ru_set_all.py b/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
index afb2314..b102b4e 100644
--- a/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
+++ b/ambari-server/src/test/python/custom_actions/test_ru_set_all.py
@@ -93,7 +93,7 @@ class TestRUSetAll(RMFTestCase):
     ru_execute = UpgradeSetAll()
     ru_execute.actionexecute(None)
 
-    call_mock.assert_called_with(('ambari-python-wrap', 'hdp-select', 'set', 'all', u'2.2.1.0-2260'), sudo=True)
+    call_mock.assert_called_with(('hdp-select', 'set', 'all', u'2.2.1.0-2260'), sudo=True)
 
   @patch("resource_management.core.shell.call")
   @patch.object(Script, 'get_config')
@@ -129,7 +129,7 @@ class TestRUSetAll(RMFTestCase):
     ru_execute.actionexecute(None)
 
     self.assertTrue(link_mock.called)
-    call_mock.assert_called_with(('ambari-python-wrap', 'hdp-select', 'set', 'all', '2.3.0.0-1234'), sudo=True)
+    call_mock.assert_called_with(('hdp-select', 'set', 'all', '2.3.0.0-1234'), sudo=True)
 
 
   @patch("os.path.islink")

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py b/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
index 222090e..be2b87c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
@@ -536,7 +536,7 @@ class TestFlumeHandler(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'flume-server', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'flume-server', '2.2.1.0-2067'), sudo=True)
 
 
 def build_flume(content):

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
index 07d05bc..ff25933 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
@@ -202,9 +202,9 @@ class TestHBaseClient(RMFTestCase):
                    target = RMFTestCase.TARGET_COMMON_SERVICES,
                    mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'hbase-client', '2.2.1.0-2067'), sudo=True)
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'phoenix-client', '2.2.1.0-2067'), sudo=True)
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hbase-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'phoenix-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
     self.assertEquals(1, mocks_dict['call'].call_count)
 
 
@@ -228,9 +228,9 @@ class TestHBaseClient(RMFTestCase):
                        call_mocks = [(0, None), (0, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hbase-client', version), sudo=True)
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'phoenix-client', version), sudo=True)
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hbase-client', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'phoenix-client', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-client', version), sudo=True)
 
     self.assertEquals(3, mocks_dict['call'].call_count)
     self.assertEquals(6, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index a7bcf23..f19da4c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -733,7 +733,7 @@ class TestHBaseMaster(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        mocks_dict = mocks_dict)
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'hbase-master', version), sudo=True,)
+                              ('hdp-select', 'set', 'hbase-master', version), sudo=True,)
     self.assertFalse(call_mock.called)
     self.assertNoMoreResources()
 
@@ -757,7 +757,7 @@ class TestHBaseMaster(RMFTestCase):
                        call_mocks = [(0, None), (0, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hbase-master', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hbase-master', version), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(3, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
index 6c34dcd..2cb05c0 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
@@ -491,7 +491,7 @@ class TestHbaseRegionServer(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'hbase-regionserver', version), sudo=True,)
+                              ('hdp-select', 'set', 'hbase-regionserver', version), sudo=True,)
     self.assertNoMoreResources()
 
 
@@ -533,7 +533,7 @@ class TestHbaseRegionServer(RMFTestCase):
                        call_mocks = [(0, None), (0, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hbase-regionserver', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hbase-regionserver', version), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(3, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
index bffd6f4..0cfc2e3 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
@@ -413,6 +413,6 @@ class TestPhoenixQueryServer(RMFTestCase):
         mode = 0755,
         cd_access = 'a',
     )
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'phoenix-server', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'phoenix-server', '2.3.0.0-1234'), sudo=True)
 
     self.assertNoMoreResources()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index 72925a0..d5a42f0 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -451,7 +451,7 @@ class TestDatanode(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-hdfs-datanode', version), sudo=True,)
+                              ('hdp-select', 'set', 'hadoop-hdfs-datanode', version), sudo=True,)
     self.assertNoMoreResources()
 
 
@@ -472,7 +472,7 @@ class TestDatanode(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-hdfs-datanode', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-hdfs-datanode', version), sudo=True,)
 
     self.assertNoMoreResources()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
index 7543f7d..4948d01 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
@@ -82,7 +82,7 @@ class Test(RMFTestCase):
                    hdp_stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
 
     # for now, it's enough that hdp-select is confirmed
 
@@ -204,7 +204,7 @@ class Test(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-client', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)
@@ -228,5 +228,5 @@ class Test(RMFTestCase):
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-client', version), sudo=True,)
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
index 61f88ea..becc82b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
@@ -472,7 +472,7 @@ class TestJournalnode(RMFTestCase):
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-hdfs-journalnode', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-hdfs-journalnode', version), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -493,7 +493,7 @@ class TestJournalnode(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-hdfs-journalnode', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-hdfs-journalnode', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index 116fa94..b9211a5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -1300,7 +1300,7 @@ class TestNamenode(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-hdfs-namenode', version), sudo=True)
+                              ('hdp-select', 'set', 'hadoop-hdfs-namenode', version), sudo=True)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -1320,7 +1320,7 @@ class TestNamenode(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-hdfs-namenode', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-hdfs-namenode', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
index 65f294f..89b4762 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
@@ -401,5 +401,5 @@ class TestNFSGateway(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None), (0, None), (0, None), (0, None)])
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-hdfs-nfs3', version), sudo=True,)
+                              ('hdp-select', 'set', 'hadoop-hdfs-nfs3', version), sudo=True,)
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
index 6173081..79bcc73 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
@@ -188,7 +188,7 @@ class TestHiveClient(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', version), sudo=True,)
+                              ('hdp-select', 'set', 'hadoop-client', version), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -210,7 +210,7 @@ class TestHiveClient(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', version), sudo=True,)
+                              ('hdp-select', 'set', 'hadoop-client', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(2, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
index f149217..e4ca82e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
@@ -733,7 +733,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
      call_mocks = call_side_effects
     )
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hive-server2', '2.2.1.0-2065'), sudo=True,)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hive-server2', '2.2.1.0-2065'), sudo=True,)
     self.assertResourceCalledByIndex(31, 'Execute', 'hive --config /usr/hdp/current/hive-server2/conf/conf.server --service hiveserver2 --deregister 1.2.1.2.3.0.0-2434',
       path=['/bin:/usr/hdp/current/hive-server2/bin:/usr/hdp/current/hadoop-client/bin'],
       tries=1, user='hive')
@@ -757,7 +757,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
      call_mocks = call_side_effects
     )
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hive-server2', '2.2.1.0-2065'), sudo=True,)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hive-server2', '2.2.1.0-2065'), sudo=True,)
     self.assertResourceCalledByIndex(33, 'Execute', 'hive --config /etc/hive/conf.server --service hiveserver2 --deregister 1.2.1.2.3.0.0-2434',
       path=['/bin:/usr/hdp/current/hive-server2/bin:/usr/hdp/current/hadoop-client/bin'],
       tries=1, user='hive')
@@ -773,7 +773,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
     except:
       pass
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hive-server2', '2.2.1.0-2065'), sudo=True,)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hive-server2', '2.2.1.0-2065'), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.libraries.functions.security_commons.build_expectations")
@@ -905,7 +905,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'hive-server2', version), sudo=True,)
+                              ('hdp-select', 'set', 'hive-server2', version), sudo=True,)
 
     copy_to_hdfs_mock.assert_any_call("mapreduce", "hadoop", "hdfs", host_sys_prepped=False)
     copy_to_hdfs_mock.assert_any_call("tez", "hadoop", "hdfs", host_sys_prepped=False)
@@ -945,7 +945,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
 
     self.assertResourceCalled('Execute',
 
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'hive-server2', version), sudo=True,)
+                              ('hdp-select', 'set', 'hive-server2', version), sudo=True,)
     copy_to_hdfs_mock.assert_any_call("mapreduce", "hadoop", "hdfs", host_sys_prepped=False)
     copy_to_hdfs_mock.assert_any_call("tez", "hadoop", "hdfs", host_sys_prepped=False)
     self.assertEquals(2, copy_to_hdfs_mock.call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
index 7af944e..aca7664 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
@@ -356,7 +356,7 @@ class TestWebHCatServer(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'hive-webhcat', version), sudo=True,)
+                              ('hdp-select', 'set', 'hive-webhcat', version), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -385,7 +385,7 @@ class TestWebHCatServer(RMFTestCase):
     self.assertTrue("/usr/hdp/current/hive-webhcat/etc/webhcat" == sys.modules["params"].webhcat_conf_dir)
 
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'hive-webhcat', version), sudo=True,)
+                              ('hdp-select', 'set', 'hive-webhcat', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(2, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
index caade81..e8196d6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
@@ -211,7 +211,7 @@ class TestOozieClient(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'oozie-client', version), sudo=True)
+                              ('hdp-select', 'set', 'oozie-client', version), sudo=True)
     self.assertNoMoreResources()
 
   
@@ -234,7 +234,7 @@ class TestOozieClient(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'oozie-client', version), sudo=True)
+                              ('hdp-select', 'set', 'oozie-client', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index 5e25035..9cf426f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -1179,7 +1179,7 @@ class TestOozieServer(RMFTestCase):
       ('tar', '-zcvhf', '/tmp/oozie-upgrade-backup/oozie-conf-backup.tar', '/usr/hdp/current/oozie-server/conf/'),
       sudo = True)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'oozie-server', u'2.2.1.0-2135'),
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'oozie-server', u'2.2.1.0-2135'),
       sudo = True )
 
     self.assertResourceCalled('Execute',
@@ -1247,7 +1247,7 @@ class TestOozieServer(RMFTestCase):
       ('tar', '-zcvhf', '/tmp/oozie-upgrade-backup/oozie-conf-backup.tar', '/usr/hdp/current/oozie-server/conf/'),
       sudo = True)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'oozie-server', '2.3.0.0-1234'), sudo = True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'oozie-server', '2.3.0.0-1234'), sudo = True)
 
     self.assertResourceCalled('Execute',
       ('tar', '-xvf', '/tmp/oozie-upgrade-backup/oozie-conf-backup.tar', '-C', '/usr/hdp/current/oozie-server/conf//'),
@@ -1305,7 +1305,7 @@ class TestOozieServer(RMFTestCase):
       ('tar', '-zcvhf', '/tmp/oozie-upgrade-backup/oozie-conf-backup.tar', '/usr/hdp/current/oozie-server/conf/'),
       sudo = True)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'oozie-server', u'2.2.0.0-0000'), sudo = True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'oozie-server', u'2.2.0.0-0000'), sudo = True)
 
     self.assertResourceCalled('Execute',
       ('tar', '-xvf', '/tmp/oozie-upgrade-backup/oozie-conf-backup.tar', '-C', '/usr/hdp/current/oozie-server/conf//'),
@@ -1505,7 +1505,7 @@ class TestOozieServer(RMFTestCase):
       ('tar', '-zcvhf', '/tmp/oozie-upgrade-backup/oozie-conf-backup.tar', '/usr/hdp/current/oozie-server/conf/'),
       sudo = True)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'oozie-server', '2.3.0.0-1234'), sudo = True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'oozie-server', '2.3.0.0-1234'), sudo = True)
 
     self.assertResourceCalled('Execute',
       ('tar', '-xvf', '/tmp/oozie-upgrade-backup/oozie-conf-backup.tar', '-C', '/usr/hdp/current/oozie-server/conf//'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
index b74dc5b..2f42520 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
@@ -146,7 +146,7 @@ class TestPigClient(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', version), sudo=True)
+                              ('hdp-select', 'set', 'hadoop-client', version), sudo=True)
     self.assertNoMoreResources()
 
   def test_pre_rolling_restart_23(self):
@@ -167,7 +167,7 @@ class TestPigClient(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', version), sudo=True)
+                              ('hdp-select', 'set', 'hadoop-client', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(2, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py b/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
index 3dc2c4b..2c0b8c8 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
@@ -129,7 +129,7 @@ class TestSqoop(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'sqoop-client', version), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'sqoop-client', version), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
index a4e9cb8..b6d5f42 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
@@ -779,7 +779,7 @@ class TestHistoryServer(RMFTestCase):
                        call_mocks = [(0, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-mapreduce-historyserver', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-mapreduce-historyserver', version), sudo=True)
     copy_to_hdfs_mock.assert_called_with("tez", "hadoop", "hdfs", host_sys_prepped=False)
 
     self.assertResourceCalled('HdfsResource', None,

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
index 474dee4..0e52264 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
@@ -384,7 +384,7 @@ class TestMapReduce2Client(RMFTestCase):
     )
 
     # for now, it's enough that hdp-select is confirmed
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
 
 
   def test_pre_rolling_restart_23(self):
@@ -404,7 +404,7 @@ class TestMapReduce2Client(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-client', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
index f34ca30..2692420 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
@@ -713,7 +713,7 @@ class TestNodeManager(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-yarn-nodemanager', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-yarn-nodemanager', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
index 09f87e6..fb7d847 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
@@ -640,7 +640,7 @@ class TestResourceManager(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-yarn-resourcemanager', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-yarn-resourcemanager', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
index effbc22..21fbb9d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
@@ -551,7 +551,7 @@ class TestYarnClient(RMFTestCase):
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
 
     # for now, it's enough that hdp-select is confirmed
 
@@ -573,7 +573,7 @@ class TestYarnClient(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-client', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
index bfd476e..79fd74b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
@@ -167,7 +167,7 @@ class TestZookeeperClient(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'zookeeper-client', version), sudo=True)
+                              ('hdp-select', 'set', 'zookeeper-client', version), sudo=True)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -191,7 +191,7 @@ class TestZookeeperClient(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'zookeeper-client', version), sudo=True)
+                              ('hdp-select', 'set', 'zookeeper-client', version), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
index be24a11..afc4bc7 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
@@ -353,7 +353,7 @@ class TestZookeeperServer(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'zookeeper-server', version), sudo=True)
+                              ('hdp-select', 'set', 'zookeeper-server', version), sudo=True)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -377,7 +377,7 @@ class TestZookeeperServer(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'zookeeper-server', version), sudo=True)
+                              ('hdp-select', 'set', 'zookeeper-server', version), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
index 9e56e6e..9b08a90 100644
--- a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
+++ b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
@@ -118,7 +118,7 @@ class TestFalconClient(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'falcon-client', version), sudo=True,)
+                              ('hdp-select', 'set', 'falcon-client', version), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -140,7 +140,7 @@ class TestFalconClient(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'falcon-client', version), sudo=True,)
+                              ('hdp-select', 'set', 'falcon-client', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
index da9282c..c9166a5 100644
--- a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
@@ -220,7 +220,7 @@ class TestFalconServer(RMFTestCase):
      u'/hadoop/falcon'),
         sudo = True,
     )
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'falcon-server', u'2.2.1.0-2135'),
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'falcon-server', u'2.2.1.0-2135'),
         sudo = True,
     )
     self.assertResourceCalled('Execute', ('tar',
@@ -504,7 +504,7 @@ class TestFalconServer(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'falcon-server', version), sudo=True,)
+                              ('hdp-select', 'set', 'falcon-server', version), sudo=True,)
     self.printResources()
 
   @patch('os.path.isfile', new=MagicMock(return_value=True))
@@ -531,7 +531,7 @@ class TestFalconServer(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'falcon-server', version), sudo=True,)
+                              ('hdp-select', 'set', 'falcon-server', version), sudo=True,)
 
     self.assertResourceCalled('Execute', ('tar',
      '-xvf',

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
index c5566a8..9d42b9f 100644
--- a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
+++ b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
@@ -496,7 +496,7 @@ class TestHiveMetastore(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'hive-metastore', version), sudo=True,)
+                              ('hdp-select', 'set', 'hive-metastore', version), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -518,7 +518,7 @@ class TestHiveMetastore(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'hive-metastore', version), sudo=True,)
+                              ('hdp-select', 'set', 'hive-metastore', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)
@@ -581,7 +581,7 @@ class TestHiveMetastore(RMFTestCase):
      logoutput = True, environment = {'HIVE_CONF_DIR': '/usr/hdp/current/hive-server2/conf/conf.server'},
       tries = 1, user = 'hive')
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hive-metastore', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hive-metastore', version), sudo=True,)
 
     self.assertNoMoreResources()
 
@@ -671,6 +671,6 @@ class TestHiveMetastore(RMFTestCase):
                               logoutput = True, environment = {'HIVE_CONF_DIR': '/usr/hdp/current/hive-server2/conf/conf.server'},
                               tries = 1, user = 'hive')
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hive-metastore', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hive-metastore', version), sudo=True,)
 
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
index 75f01c9..283c865 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
@@ -150,7 +150,7 @@ class TestStormDrpcServer(TestStormBase):
                      hdp_stack_version = self.STACK_VERSION,
                      target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
 
   def test_pre_rolling_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/default.json"
@@ -169,7 +169,7 @@ class TestStormDrpcServer(TestStormBase):
                      call_mocks = [(0, None), (0, None)],
                      mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
index 1ef9dc5..bbcc15a 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
@@ -149,8 +149,8 @@ class TestStormNimbus(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-nimbus', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-nimbus', '2.2.1.0-2067'), sudo=True)
 
   def test_pre_rolling_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/default.json"
@@ -169,8 +169,8 @@ class TestStormNimbus(TestStormBase):
                      call_mocks = [(0, None), (0, None)],
                      mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-nimbus', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-nimbus', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
index 125705f..03f33f6 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
@@ -114,8 +114,8 @@ class TestStormNimbus(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-nimbus', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-nimbus', '2.2.1.0-2067'), sudo=True)
 
   def test_pre_rolling_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/default.json"
@@ -134,8 +134,8 @@ class TestStormNimbus(TestStormBase):
                      call_mocks = [(0, None), (0, None)],
                      mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-nimbus', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-nimbus', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
index 90c3205..b26913e 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
@@ -149,7 +149,7 @@ class TestStormRestApi(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
 
   def test_pre_rolling_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/default.json"
@@ -168,7 +168,7 @@ class TestStormRestApi(TestStormBase):
                      call_mocks = [(0, None), (0, None)],
                      mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
index 8173ba7..84fb64c 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
@@ -194,8 +194,8 @@ class TestStormSupervisor(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-supervisor', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-supervisor', '2.2.1.0-2067'), sudo=True)
 
   def test_pre_rolling_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/default.json"
@@ -214,8 +214,8 @@ class TestStormSupervisor(TestStormBase):
                      call_mocks = [(0, None), (0, None)],
                      mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-supervisor', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-supervisor', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
index b9d5381..649b716 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
@@ -163,8 +163,8 @@ class TestStormSupervisor(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-supervisor', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-supervisor', '2.2.1.0-2067'), sudo=True)
 
   def test_pre_rolling_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/default.json"
@@ -183,8 +183,8 @@ class TestStormSupervisor(TestStormBase):
                      call_mocks = [(0, None), (0, None)],
                      mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-supervisor', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-supervisor', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
index 2b50320..185c66d 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
@@ -147,7 +147,7 @@ class TestStormUiServer(TestStormBase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
 
   def test_pre_rolling_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/default.json"
@@ -166,7 +166,7 @@ class TestStormUiServer(TestStormBase):
                      call_mocks = [(0, None), (0, None)],
                      mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py b/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
index 6083d70..89037eb 100644
--- a/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
+++ b/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
@@ -73,7 +73,7 @@ class TestTezClient(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     get_hdp_version_mock.return_value = "2.2.1.0-2067"
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
 
     # for now, it's enough that hdp-select is confirmed
 
@@ -87,7 +87,7 @@ class TestTezClient(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     get_hdp_version_mock.return_value = "2.2.1.0-2067"
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
 
     # for now, it's enough that hdp-select is confirmed
 
@@ -108,7 +108,7 @@ class TestTezClient(RMFTestCase):
                        call_mocks = [(0, None), (0, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-client', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(2, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
index 4d264b1..cb7b06b 100644
--- a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
+++ b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
@@ -392,7 +392,7 @@ class TestAppTimelineServer(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-yarn-timelineserver', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'hadoop-yarn-timelineserver', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py b/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
index 5e6edb4..1b518d4 100644
--- a/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
+++ b/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
@@ -44,7 +44,7 @@ class TestAccumuloClient(RMFTestCase):
       hdp_stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'accumulo-client', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'accumulo-client', version), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -67,7 +67,7 @@ class TestAccumuloClient(RMFTestCase):
       call_mocks = [(0, None), (0, None)],
       mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'accumulo-client', version), sudo=True,)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'accumulo-client', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py b/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
index 6af4d5f..ffb5914 100644
--- a/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
+++ b/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
@@ -113,7 +113,7 @@ class TestKafkaBroker(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'kafka-broker', version), sudo=True,)
+                              ('hdp-select', 'set', 'kafka-broker', version), sudo=True,)
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
@@ -135,7 +135,7 @@ class TestKafkaBroker(RMFTestCase):
                        mocks_dict = mocks_dict)
 
     self.assertResourceCalled('Execute',
-                              ('ambari-python-wrap', 'hdp-select', 'set', 'kafka-broker', version), sudo=True,)
+                              ('hdp-select', 'set', 'kafka-broker', version), sudo=True,)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py b/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
index 5da3ac4..83bba1f 100644
--- a/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
+++ b/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
@@ -248,7 +248,7 @@ class TestKnoxGateway(RMFTestCase):
      '/var/lib/knox/data'),
         sudo = True,
     )
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'knox-server', '2.2.1.0-3242'),
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'knox-server', '2.2.1.0-3242'),
         sudo = True,
     )
     self.assertNoMoreResources()
@@ -292,7 +292,7 @@ class TestKnoxGateway(RMFTestCase):
      '/var/lib/knox/data'),
         sudo = True,
     )
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'knox-server', version),
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'knox-server', version),
         sudo = True,
     )
     self.assertResourceCalled('Execute', ('cp',
@@ -364,7 +364,7 @@ class TestKnoxGateway(RMFTestCase):
      '/var/lib/knox/data'),
         sudo = True,
     )
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'knox-server', version),
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'knox-server', version),
         sudo = True,
     )
     self.assertResourceCalled('Execute', ('cp',
@@ -438,7 +438,7 @@ class TestKnoxGateway(RMFTestCase):
     )
 
     '''
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'knox-server', version),
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'knox-server', version),
         sudo = True,
     )
     self.assertResourceCalled('Execute', ('cp',

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
index 707718c..912f187 100644
--- a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
+++ b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
@@ -211,7 +211,7 @@ class TestRangerAdmin(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'ranger-admin', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'ranger-admin', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(1, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
index 6accd29..d10a9fc 100644
--- a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
+++ b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
@@ -125,7 +125,7 @@ class TestRangerUsersync(RMFTestCase):
                               environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_67'},
                               sudo = True
     )
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'ranger-usersync', '2.2.2.0-2399'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'ranger-usersync', '2.2.2.0-2399'), sudo=True)
 
   @patch("setup_ranger.setup_usersync")
   def test_upgrade_23(self, setup_usersync_mock):
@@ -148,7 +148,7 @@ class TestRangerUsersync(RMFTestCase):
     self.assertResourceCalled("Execute", ("/usr/bin/ranger-usersync-stop",),
                               environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_67'},
                               sudo = True)
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'ranger-usersync', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'ranger-usersync', '2.3.0.0-1234'), sudo=True)
 
     self.assertEquals(2, mocks_dict['call'].call_count)
     self.assertEquals(1, mocks_dict['checked_call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py b/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
index 931a87c..3af55f6 100644
--- a/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
+++ b/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
@@ -115,8 +115,8 @@ class TestSliderClient(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'slider-client', '2.2.1.0-2067'), sudo=True)
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'slider-client', '2.2.1.0-2067'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
     self.assertNoMoreResources()
 
 
@@ -136,8 +136,8 @@ class TestSliderClient(RMFTestCase):
                        call_mocks = [(0, None), (0, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'slider-client', '2.3.0.0-1234'), sudo=True)
-    self.assertResourceCalled("Execute", ('ambari-python-wrap', 'hdp-select', 'set', 'hadoop-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'slider-client', '2.3.0.0-1234'), sudo=True)
+    self.assertResourceCalled("Execute", ('hdp-select', 'set', 'hadoop-client', '2.3.0.0-1234'), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(2, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py b/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
index d73a898..4b87531 100644
--- a/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
+++ b/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
@@ -303,7 +303,7 @@ class TestJobHistoryServer(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'spark-historyserver', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'spark-historyserver', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py b/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
index e6c8ea0..081db57 100644
--- a/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
+++ b/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
@@ -140,7 +140,7 @@ class TestSparkClient(RMFTestCase):
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'spark-client', version), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'spark-client', version), sudo=True)
     self.assertNoMoreResources()
 
     self.assertEquals(1, mocks_dict['call'].call_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e3b0c362/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py b/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
index e14a339..a44c5af 100644
--- a/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
+++ b/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
@@ -76,7 +76,7 @@ class TestMahoutClient(RMFTestCase):
       hdp_stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'mahout-client', '2.2.1.0-3242'), sudo=True)
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'mahout-client', '2.2.1.0-3242'), sudo=True)
     self.assertNoMoreResources()
 
   def test_pre_rolling_restart_23(self):
@@ -103,7 +103,7 @@ class TestMahoutClient(RMFTestCase):
       call_mocks = itertools.cycle([(0, None)]),
       mocks_dict = mocks_dict)
 
-    self.assertResourceCalled('Execute', ('ambari-python-wrap', 'hdp-select', 'set', 'mahout-client', '2.3.0.0-1234'),
+    self.assertResourceCalled('Execute', ('hdp-select', 'set', 'mahout-client', '2.3.0.0-1234'),
         sudo = True,
     )
     self.assertNoMoreResources()


[45/50] [abbrv] ambari git commit: AMBARI-13256 configs.sh cannot add name-value pair to an empty config (dsen)

Posted by nc...@apache.org.
AMBARI-13256 configs.sh cannot add name-value pair to an empty config (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a1488cec
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a1488cec
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a1488cec

Branch: refs/heads/branch-dev-patch-upgrade
Commit: a1488cec7378b29bf04be08b901d6a210cfa9269
Parents: 7db1f10
Author: Dmytro Sen <ds...@apache.org>
Authored: Tue Sep 29 16:21:03 2015 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Tue Sep 29 16:21:03 2015 +0300

----------------------------------------------------------------------
 .../src/main/resources/scripts/configs.sh       | 21 ++++++++++++++------
 1 file changed, 15 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a1488cec/ambari-server/src/main/resources/scripts/configs.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/configs.sh b/ambari-server/src/main/resources/scripts/configs.sh
index a32ff42..ba02d2d 100755
--- a/ambari-server/src/main/resources/scripts/configs.sh
+++ b/ambari-server/src/main/resources/scripts/configs.sh
@@ -130,14 +130,19 @@ doConfigUpdate () {
       if [ "`echo $line | grep -E "},?$"`" ]; then
         ## Properties ended
         ## Add property
-        # Remove the last ,
         propLen=${#newProperties}
         lastChar=${newProperties:$propLen-1:1}
-        if [ "$lastChar" == "," ]; then
-          newProperties=${newProperties:0:$propLen-1}
-        fi
-        if [ "$MODE" == "set" ]; then
-          newProperties="$newProperties, \"$CONFIGKEY\" : \"$CONFIGVALUE\" "
+        if [ "$MODE" == "delete" ]; then
+          # Remove the last ,
+          if [ "$lastChar" == "," ]; then
+            newProperties=${newProperties:0:$propLen-1}
+          fi
+        elif [ "$MODE" == "set" ]; then
+          # Add comma if required
+          if [ "$lastChar" != ","  -a "$lastChar" != "{" ]; then
+            newProperties="$newProperties,"
+          fi
+          newProperties="$newProperties \"$CONFIGKEY\" : \"$CONFIGVALUE\""
         fi
         newProperties=$newProperties$line
         propertiesStarted=0
@@ -154,6 +159,10 @@ doConfigUpdate () {
     elif [ "`echo $line | grep -E "},?$"`" ]; then
         currentLevel=$((currentLevel-1))
         if [ "$currentLevel" == 1 ]; then
+          # if no properties in current config
+          if [ "$MODE" == "set" -a -z "$newProperties" ]; then
+            newProperties="\"properties\" : { \"$CONFIGKEY\" : \"$CONFIGVALUE\"}"
+          fi
           newTag=`date "+%s%N"`
           newTag="version${newTag}"
           finalJson="{ \"Clusters\": { \"desired_config\": {\"type\": \"$SITE\", \"tag\":\"$newTag\", $newProperties}}}"


[39/50] [abbrv] ambari git commit: AMBARI-13233. Error message for ambari agent install failure when the ping port is taken by old agent process should state port and old process (Di Li via alejandro)

Posted by nc...@apache.org.
AMBARI-13233. Error message for ambari agent install failure when the ping port is taken by old agent process should state port and old process (Di Li via alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2b340164
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2b340164
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2b340164

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 2b3401640473c0b0988f52dff5683820107d3ecc
Parents: 460d191
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Mon Sep 28 15:04:25 2015 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Mon Sep 28 15:04:25 2015 -0700

----------------------------------------------------------------------
 ambari-agent/src/main/python/ambari_agent/PingPortListener.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2b340164/ambari-agent/src/main/python/ambari_agent/PingPortListener.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/PingPortListener.py b/ambari-agent/src/main/python/ambari_agent/PingPortListener.py
index 46be26b..313c060 100644
--- a/ambari-agent/src/main/python/ambari_agent/PingPortListener.py
+++ b/ambari-agent/src/main/python/ambari_agent/PingPortListener.py
@@ -39,7 +39,7 @@ class PingPortListener(threading.Thread):
     self.host = '0.0.0.0'
     self.port = int(self.config.get('agent','ping_port'))
     if not self.port == None and not self.port == 0:
-      (stdoutdata, stderrdata) = self.run_os_command_in_shell(FUSER_CMD.format(str(self.port), "{print $2}"))
+      (stdoutdata, stderrdata) = self.run_os_command_in_shell(FUSER_CMD.format(str(self.port), "{print $1}"))
       if stdoutdata.strip() and stdoutdata.strip().isdigit():
         (stdoutdata, stderrdata) = self.run_os_command_in_shell(PSPF_CMD.format(stdoutdata.strip()))
         raise Exception(PORT_IN_USE_MESSAGE.format(str(self.port), stdoutdata))