You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2016/03/11 06:47:09 UTC

[01/21] ambari git commit: AMBARI-15362 Combo Search: use component models instead of host component model to populate dropdown list (Joe Wang via rzang) [Forced Update!]

Repository: ambari
Updated Branches:
  refs/heads/AMBARI-13364 7d862f588 -> 171379abd (forced update)


AMBARI-15362 Combo Search: use component models instead of host component model to populate dropdown list (Joe Wang via rzang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/335ddb1f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/335ddb1f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/335ddb1f

Branch: refs/heads/AMBARI-13364
Commit: 335ddb1fda405782399517d49077738fa533b47d
Parents: e5d261f
Author: Richard Zang <rz...@apache.org>
Authored: Wed Mar 9 15:24:48 2016 -0800
Committer: Richard Zang <rz...@apache.org>
Committed: Wed Mar 9 15:24:48 2016 -0800

----------------------------------------------------------------------
 .../controllers/main/host/combo_search_box.js   |  4 --
 .../templates/main/host/component_filter.hbs    | 73 --------------------
 .../app/templates/main/host/version_filter.hbs  | 30 --------
 .../app/views/main/host/combo_search_box.js     | 12 ++--
 4 files changed, 6 insertions(+), 113 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/335ddb1f/ambari-web/app/controllers/main/host/combo_search_box.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/host/combo_search_box.js b/ambari-web/app/controllers/main/host/combo_search_box.js
index 6f64170..2fa4479 100644
--- a/ambari-web/app/controllers/main/host/combo_search_box.js
+++ b/ambari-web/app/controllers/main/host/combo_search_box.js
@@ -56,10 +56,6 @@ App.MainHostComboSearchBoxController = Em.Controller.extend({
     return App.HostComponent.find().filterProperty('componentName', facet).length > 0;
   },
 
-  isClientComponent: function(name) {
-    return name.indexOf('CLIENT') >= 0;
-  },
-
   generateQueryParam: function(param) {
     var expressions = param.key;
     var pHash = this.createComboParamHash(param);

http://git-wip-us.apache.org/repos/asf/ambari/blob/335ddb1f/ambari-web/app/templates/main/host/component_filter.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/host/component_filter.hbs b/ambari-web/app/templates/main/host/component_filter.hbs
deleted file mode 100644
index d212533..0000000
--- a/ambari-web/app/templates/main/host/component_filter.hbs
+++ /dev/null
@@ -1,73 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<button class="btn single-btn-group filter-btn" id="host-components-filter-button" {{action "clickFilterButton" target="view"}}>{{t common.filter}} <i class="icon-filter"></i></button>
-    <ul class="dropdown-menu filter-components">
-      <li id="title-bar">{{t hosts.filters.filterComponents}}
-        <a class="close" {{action "closeFilter" target="view"}}>X</a>
-      </li>
-      <li id="list-area">
-        <ul>
-          <li>
-            <label class="checkbox">
-            {{view Ember.Checkbox checkedBinding="view.masterComponentsChecked"}} {{t host.host.componentFilter.master}}:
-            </label>
-            <ul>
-            {{#each component in view.masterComponents}}
-              <li>
-                <label class="checkbox">
-                {{view Ember.Checkbox checkedBinding="component.checkedForHostFilter" }} {{unbound component.displayName}}
-                </label>
-              </li>
-            {{/each}}
-            </ul>
-          </li>
-          <li>
-            <label class="checkbox">
-            {{view Ember.Checkbox checkedBinding="view.slaveComponentsChecked"}} {{t host.host.componentFilter.slave}}:
-            </label>
-            <ul>
-            {{#each component in view.slaveComponents}}
-              <li>
-                <label class="checkbox">
-                {{view Ember.Checkbox checkedBinding="component.checkedForHostFilter" }} {{unbound component.displayName}}
-                </label>
-              </li>
-            {{/each}}
-            </ul>
-          </li>
-          <li>
-            <label class="checkbox">
-            {{view Ember.Checkbox checkedBinding="view.clientComponentsChecked"}} {{t host.host.componentFilter.client}}:
-            </label>
-            <ul>
-            {{#each component in view.clientComponents}}
-              <li>
-              <label class="checkbox">
-              {{view Ember.Checkbox checkedBinding="component.checkedForHostFilter" }} {{unbound component.displayName}}
-              </label>
-              </li>
-            {{/each}}
-            </ul>
-          </li>
-        </ul>
-      </li>
-      <li id="button-bar">
-        <button class="btn btn-inverse" {{action "applyFilter" target="view"}}>{{t common.apply}}</button>
-      </li>
-    </ul>

http://git-wip-us.apache.org/repos/asf/ambari/blob/335ddb1f/ambari-web/app/templates/main/host/version_filter.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/host/version_filter.hbs b/ambari-web/app/templates/main/host/version_filter.hbs
deleted file mode 100644
index f16e8b5..0000000
--- a/ambari-web/app/templates/main/host/version_filter.hbs
+++ /dev/null
@@ -1,30 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-
-<button class="btn single-btn-group filter-btn" id="host-version-filter-button" {{action "clickFilterButton" target="view"}}>
-  {{t common.filter}}
-  <i class="icon-filter"></i>
-</button>
-<ul class="dropdown-menu filter-components">
-  <li>{{view view.versionSelectView}}</li>
-  <li>{{view view.statusSelectView}}</li>
-  <li id="button-bar">
-    <button class="btn btn-inverse" {{action "applyFilter" target="view"}}>{{t common.apply}}</button>
-  </li>
-</ul>

http://git-wip-us.apache.org/repos/asf/ambari/blob/335ddb1f/ambari-web/app/views/main/host/combo_search_box.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/host/combo_search_box.js b/ambari-web/app/views/main/host/combo_search_box.js
index 3ccacf2..c1d00f4 100644
--- a/ambari-web/app/views/main/host/combo_search_box.js
+++ b/ambari-web/app/views/main/host/combo_search_box.js
@@ -33,14 +33,14 @@ App.MainHostComboSearchBoxView = Em.View.extend({
   },
 
   getHostComponentList: function() {
-    var controller = App.router.get('mainHostComboSearchBoxController');
     var hostComponentList = [];
-    App.HostComponent.find().toArray().forEach(function(component) {
+    App.MasterComponent.find().rejectProperty('totalCount', 0).toArray()
+        .concat(App.SlaveComponent.find().rejectProperty('totalCount', 0).toArray())
+        .forEach(function(component) {
       var displayName = component.get('displayName');
-      var name = component.get('componentName');
-      if (displayName != null && !controller.isClientComponent(name)) {
+      if (displayName) {
         hostComponentList.push({label: displayName, category: 'Component'});
-        App.router.get('mainHostController.labelValueMap')[displayName] = name;
+        App.router.get('mainHostController.labelValueMap')[displayName] = component.get('componentName');
       }
     });
     return hostComponentList;
@@ -130,7 +130,7 @@ App.MainHostComboSearchBoxView = Em.View.extend({
             case 'rack':
               callback(App.Host.find().toArray().mapProperty('rack').uniq().reject(function (item) {
                 return visualSearch.searchQuery.values(facet).indexOf(item) >= 0;
-              }), {preserveMatches: true});
+              }));
               break;
             case 'version':
               callback(App.HostStackVersion.find().toArray()


[12/21] ambari git commit: AMBARI-15228. Ambari overwrites permissions on HDFS directories (aonishuk)

Posted by jl...@apache.org.
AMBARI-15228. Ambari overwrites permissions on HDFS directories (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/66267961
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/66267961
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/66267961

Branch: refs/heads/AMBARI-13364
Commit: 6626796191833c77130302653d3afad5f4bf8aec
Parents: f9d317b
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Thu Mar 10 13:07:58 2016 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Thu Mar 10 13:07:58 2016 +0200

----------------------------------------------------------------------
 .../functions/get_not_managed_resources.py      |  50 +++++++++++++++++++
 .../libraries/providers/hdfs_resource.py        |  17 ++++++-
 .../libraries/resources/hdfs_resource.py        |   9 +++-
 .../ambari/server/agent/ExecutionCommand.java   |   1 +
 .../AmbariCustomCommandExecutionHelper.java     |  14 +++++-
 .../AmbariManagementControllerImpl.java         |   5 ++
 .../internal/ClientConfigResourceProvider.java  |   5 ++
 .../ambari/server/state/PropertyInfo.java       |   3 +-
 .../1.6.1.2.2.0/package/scripts/params.py       |   2 +
 .../0.1.0/package/scripts/params.py             |   4 +-
 .../0.5.0.2.1/configuration/falcon-env.xml      |  14 +++++-
 .../FALCON/0.5.0.2.1/package/scripts/falcon.py  |   4 +-
 .../0.5.0.2.1/package/scripts/params_linux.py   |   4 +-
 .../HAWQ/2.0.0/package/scripts/params.py        |   4 +-
 .../0.96.0.2.0/package/scripts/params_linux.py  |   2 +
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml |  12 +++++
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |   2 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |   4 ++
 .../2.1.0.2.0/package/scripts/service_check.py  |   4 +-
 .../HIVE/0.12.0.2.0/configuration/hive-site.xml |   1 +
 .../0.12.0.2.0/package/scripts/params_linux.py  |   2 +
 .../KAFKA/0.8.1.2.2/package/scripts/params.py   |   7 +--
 .../0.5.0.2.2/package/scripts/params_linux.py   |   4 +-
 .../MAHOUT/1.0.0.2.3/package/scripts/params.py  |   2 +
 .../4.0.0.2.0/package/scripts/params_linux.py   |   2 +
 .../0.12.0.2.0/package/scripts/params_linux.py  |   2 +
 .../PXF/3.0.0/package/scripts/params.py         |   4 +-
 .../0.60.0.2.2/package/scripts/params_linux.py  |   4 +-
 .../SPARK/1.2.0.2.2/package/scripts/params.py   |   2 +
 .../0.9.1.2.1/package/scripts/params_linux.py   |   6 ++-
 .../0.4.0.2.1/package/scripts/params_linux.py   |   2 +
 .../configuration-mapred/mapred-site.xml        |   1 +
 .../YARN/2.1.0.2.0/configuration/yarn-site.xml  |   1 +
 .../2.1.0.2.0/package/scripts/params_linux.py   |   2 +
 .../HDP/2.0.6/configuration/cluster-env.xml     |  10 ++++
 .../before-START/files/fast-hdfs-resource.jar   | Bin 19285461 -> 19285670 bytes
 .../2.0.6/hooks/before-START/scripts/params.py  |   4 ++
 .../scripts/shared_initialization.py            |   2 +-
 .../services/HIVE/configuration/hive-site.xml   |   1 +
 .../services/HIVE/configuration/hive-site.xml   |   1 +
 .../services/ECS/package/scripts/ecs_client.py  |   2 +-
 .../services/ECS/package/scripts/params.py      |   4 ++
 .../services/HIVE/configuration/hive-site.xml   |   1 +
 .../AmbariManagementControllerImplTest.java     |  23 ++++++++-
 .../AMBARI_METRICS/test_metrics_collector.py    |   4 ++
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |  10 ++++
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |  28 +++++++++++
 .../stacks/2.0.6/HDFS/test_service_check.py     |   5 ++
 .../stacks/2.0.6/HIVE/test_hive_server.py       |  15 ++++++
 .../2.0.6/HIVE/test_hive_service_check.py       |   7 +++
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     |  19 ++++++-
 .../stacks/2.0.6/OOZIE/test_service_check.py    |   6 +++
 .../stacks/2.0.6/PIG/test_pig_service_check.py  |   9 +++-
 .../stacks/2.0.6/YARN/test_historyserver.py     |  17 +++++++
 .../2.0.6/YARN/test_mapreduce2_service_check.py |   7 +++
 .../stacks/2.0.6/configs/altfs_plus_hdfs.json   |   3 ++
 .../stacks/2.0.6/configs/client-upgrade.json    |   2 +
 .../2.0.6/configs/default.hbasedecom.json       |   2 +
 .../python/stacks/2.0.6/configs/default.json    |   4 ++
 .../2.0.6/configs/default.non_gmetad_host.json  |   4 +-
 .../2.0.6/configs/default_ams_embedded.json     |   2 +
 .../stacks/2.0.6/configs/default_client.json    |   2 +
 .../2.0.6/configs/default_hive_nn_ha.json       |   2 +
 .../2.0.6/configs/default_hive_nn_ha_2.json     |   2 +
 .../2.0.6/configs/default_hive_non_hdfs.json    |   2 +
 .../2.0.6/configs/default_no_install.json       |   2 +
 .../2.0.6/configs/default_oozie_mysql.json      |   2 +
 .../default_update_exclude_file_only.json       |   2 +
 .../python/stacks/2.0.6/configs/flume_22.json   |   4 +-
 .../python/stacks/2.0.6/configs/flume_only.json |   3 +-
 .../stacks/2.0.6/configs/flume_target.json      |   3 +-
 .../2.0.6/configs/ha_bootstrap_active_node.json |   3 ++
 .../configs/ha_bootstrap_standby_node.json      |   3 ++
 ...ha_bootstrap_standby_node_initial_start.json |   3 ++
 .../python/stacks/2.0.6/configs/ha_default.json |   3 ++
 .../python/stacks/2.0.6/configs/ha_secured.json |   3 ++
 .../python/stacks/2.0.6/configs/hbase-2.2.json  |   2 +
 .../stacks/2.0.6/configs/hbase-check-2.2.json   |   2 +
 .../stacks/2.0.6/configs/hbase-preupgrade.json  |   2 +
 .../2.0.6/configs/hbase-rs-2.2-phoenix.json     |   2 +
 .../stacks/2.0.6/configs/hbase-rs-2.2.json      |   2 +
 .../stacks/2.0.6/configs/hbase_no_phx.json      |   2 +
 .../stacks/2.0.6/configs/hbase_with_phx.json    |   2 +
 .../test/python/stacks/2.0.6/configs/nn_eu.json |   5 +-
 .../stacks/2.0.6/configs/nn_eu_standby.json     |   5 +-
 .../python/stacks/2.0.6/configs/nn_ru_lzo.json  |   3 ++
 .../2.0.6/configs/oozie_existing_sqla.json      |   2 +
 .../2.0.6/configs/ranger-namenode-start.json    |   4 +-
 .../2.0.6/configs/rebalancehdfs_default.json    |   4 +-
 .../2.0.6/configs/rebalancehdfs_secured.json    |   2 +
 .../python/stacks/2.0.6/configs/secured.json    |   3 ++
 .../stacks/2.0.6/configs/secured_client.json    |   2 +
 .../2.0.6/configs/secured_no_jce_name.json      |   3 +-
 .../2.0.6/configs/zk-service_check_2.2.json     |   4 +-
 .../stacks/2.1/FALCON/test_falcon_server.py     |   7 +++
 .../python/stacks/2.1/TEZ/test_service_check.py |   9 ++++
 .../stacks/2.1/YARN/test_apptimelineserver.py   |   2 +
 .../stacks/2.1/configs/client-upgrade.json      |   4 +-
 .../stacks/2.1/configs/default-storm-start.json |   2 +
 .../test/python/stacks/2.1/configs/default.json |   3 ++
 .../2.1/configs/hive-metastore-upgrade.json     |   4 +-
 .../stacks/2.1/configs/secured-storm-start.json |   6 ++-
 .../test/python/stacks/2.1/configs/secured.json |   2 +
 .../stacks/2.2/PIG/test_pig_service_check.py    |   7 +++
 .../stacks/2.2/SPARK/test_job_history_server.py |   7 +++
 .../test/python/stacks/2.2/configs/default.json |   2 +
 .../2.2/configs/default_custom_path_config.json |   2 +
 .../stacks/2.2/configs/falcon-upgrade.json      |   7 ++-
 .../python/stacks/2.2/configs/hive-upgrade.json |   2 +
 .../journalnode-upgrade-hdfs-secure.json        |   2 +
 .../stacks/2.2/configs/journalnode-upgrade.json |   2 +
 .../python/stacks/2.2/configs/knox_upgrade.json |   6 ++-
 .../stacks/2.2/configs/oozie-downgrade.json     |   4 +-
 .../stacks/2.2/configs/oozie-upgrade.json       |   4 +-
 .../2.2/configs/pig-service-check-secure.json   |   6 ++-
 .../2.2/configs/ranger-admin-default.json       |   2 +
 .../2.2/configs/ranger-admin-secured.json       |   2 +
 .../2.2/configs/ranger-admin-upgrade.json       |   6 ++-
 .../2.2/configs/ranger-usersync-upgrade.json    |   6 ++-
 .../test/python/stacks/2.2/configs/secured.json |   2 +
 .../2.2/configs/spark-job-history-server.json   |   6 ++-
 .../python/stacks/2.3/HAWQ/test_hawqmaster.py   |   5 ++
 .../2.3/MAHOUT/test_mahout_service_check.py     |   5 ++
 .../2.3/SPARK/test_spark_thrift_server.py       |   3 ++
 .../test/python/stacks/2.3/YARN/test_ats_1_5.py |   6 +++
 .../test/python/stacks/2.3/configs/ats_1_5.json |   4 +-
 .../stacks/2.3/configs/default.hbasedecom.json  |   2 +
 .../test/python/stacks/2.3/configs/default.json |   2 +
 .../python/stacks/2.3/configs/hawq_default.json |   2 +
 .../stacks/2.3/configs/hbase_default.json       |   2 +
 .../python/stacks/2.3/configs/hbase_secure.json |   2 +
 .../python/stacks/2.3/configs/pxf_default.json  |   2 +
 .../stacks/2.3/configs/spark_default.json       |   2 +
 .../stacks/2.3/configs/storm_default.json       |   4 +-
 .../2.3/configs/storm_default_secure.json       |   4 +-
 .../ambari/fast_hdfs_resource/Resource.java     |  15 ++++--
 .../ambari/fast_hdfs_resource/Runner.java       |   6 +++
 137 files changed, 600 insertions(+), 64 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-common/src/main/python/resource_management/libraries/functions/get_not_managed_resources.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_not_managed_resources.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_not_managed_resources.py
new file mode 100644
index 0000000..5f8bc67
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/get_not_managed_resources.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+__all__ = ["get_not_managed_resources"]
+
+import json
+from resource_management.libraries.script import Script
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.default import default
+
+def get_not_managed_resources():
+  """
+  Returns a list of not managed hdfs paths.
+  The result contains all paths from hostLevelParams/not_managed_hdfs_path_list
+  except config values from cluster-env/managed_hdfs_resource_property_names
+  """
+  config = Script.get_config()
+  not_managed_hdfs_path_list = json.loads(config['hostLevelParams']['not_managed_hdfs_path_list'])[:]
+  managed_hdfs_resource_property_names = config['configurations']['cluster-env']['managed_hdfs_resource_property_names']
+  managed_hdfs_resource_property_list = filter(None, [property.strip() for property in managed_hdfs_resource_property_names.split(',')])
+
+  for property_name in managed_hdfs_resource_property_list:
+    property_value = default('/configurations/' + property_name, None)
+
+    if property_value == None:
+      Logger.warning(("Property {0} from cluster-env/managed_hdfs_resource_property_names not found in configurations. "
+                     "Management of this DFS resource will not be forced.").format(property_name))
+    else:
+      while property_value in not_managed_hdfs_path_list:
+        not_managed_hdfs_path_list.remove(property_value)
+
+  return not_managed_hdfs_path_list
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
index 9c5c5f8..a0396e8 100644
--- a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
+++ b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
@@ -52,6 +52,7 @@ RESOURCE_TO_JSON_FIELDS = {
   'recursive_chown': 'recursiveChown',
   'recursive_chmod': 'recursiveChmod',
   'change_permissions_for_parents': 'changePermissionforParents',
+  'manage_if_exists': 'manageIfExists',
   'dfs_type': 'dfs_type'
 }
 
@@ -77,6 +78,8 @@ class HdfsResourceJar:
         resource[json_field_name] = action_name
       elif field_name == 'mode' and main_resource.resource.mode:
         resource[json_field_name] = oct(main_resource.resource.mode)[1:]
+      elif field_name == 'manage_if_exists':
+        resource[json_field_name] = main_resource.manage_if_exists
       elif getattr(main_resource.resource, field_name):
         resource[json_field_name] = getattr(main_resource.resource, field_name)
 
@@ -245,7 +248,12 @@ class HdfsResourceWebHDFS:
     self.mode_set = False
     self.main_resource = main_resource
     self._assert_valid()
-        
+    
+    if self.main_resource.manage_if_exists == False and self.target_status:
+      Logger.info("Skipping the operation for not managed DFS directory " + str(self.main_resource.resource.target) +
+                  " since immutable_paths contains it.")
+      return            
+
     if action_name == "create":
       self._create_resource()
       self._set_mode(self.target_status)
@@ -439,7 +447,12 @@ class HdfsResourceProvider(Provider):
   def action_delayed(self, action_name):
     self.assert_parameter_is_set('type')
     
-    if HdfsResourceProvider.parse_path(self.resource.target) in self.ignored_resources_list:
+    parsed_path = HdfsResourceProvider.parse_path(self.resource.target)
+
+    parsed_not_managed_paths = [HdfsResourceProvider.parse_path(path) for path in self.resource.immutable_paths]
+    self.manage_if_exists = not parsed_path in parsed_not_managed_paths
+
+    if parsed_path in self.ignored_resources_list:
       Logger.info("Skipping '{0}' because it is in ignore file {1}.".format(self.resource, self.resource.hdfs_resource_ignore_file))
       return
     

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py b/ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py
index c5460a0..18e61fb 100644
--- a/ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py
+++ b/ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py
@@ -87,7 +87,14 @@ class HdfsResource(Resource):
   /var/lib/ambari-agent/data/.hdfs_resource_ignore
   """
   hdfs_resource_ignore_file = ResourceArgument()
-  
+
+  """
+  If the name of the HdfsResource is in immutable_paths
+  and it is already created, any actions on it will be skipped
+  (like changing permissions/recursive permissions, copying from source, deleting etc.)
+  """
+  immutable_paths = ResourceArgument(default=[])
+
   # WebHDFS needs these
   hdfs_site = ResourceArgument()
   default_fs = ResourceArgument()

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index 788e9c3..402a338 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -366,6 +366,7 @@ public class ExecutionCommand extends AgentCommand {
     String REFRESH_ADITIONAL_COMPONENT_TAGS = "forceRefreshConfigTags";
     String USER_LIST = "user_list";
     String GROUP_LIST = "group_list";
+    String NOT_MANAGED_HDFS_PATH_LIST = "not_managed_hdfs_path_list";
     String VERSION = "version";
     String REFRESH_TOPOLOGY = "refresh_topology";
     String HOST_SYS_PREPPED = "host_sys_prepped";

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index a94c6b4..352be9f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -35,6 +35,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JCE_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_LOCATION;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MYSQL_JDBC_URL;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.ORACLE_JDBC_URL;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.REPO_INFO;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT;
@@ -373,6 +374,10 @@ public class AmbariCustomCommandExecutionHelper {
       String groupList = gson.toJson(groupSet);
       hostLevelParams.put(GROUP_LIST, groupList);
 
+      Set<String> notManagedHdfsPathSet = configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster);
+      String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
+      hostLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
+
       execCmd.setHostLevelParams(hostLevelParams);
 
       Map<String, String> commandParams = new TreeMap<String, String>();
@@ -1131,12 +1136,12 @@ public class AmbariCustomCommandExecutionHelper {
         hostParamsStageJson);
   }
 
-  Map<String, String> createDefaultHostParams(Cluster cluster) {
+  Map<String, String> createDefaultHostParams(Cluster cluster) throws AmbariException {
     StackId stackId = cluster.getDesiredStackVersion();
     return createDefaultHostParams(cluster, stackId);
   }
 
-  Map<String, String> createDefaultHostParams(Cluster cluster, StackId stackId) {
+  Map<String, String> createDefaultHostParams(Cluster cluster, StackId stackId) throws AmbariException{
     TreeMap<String, String> hostLevelParams = new TreeMap<String, String>();
     hostLevelParams.put(JDK_LOCATION, managementController.getJdkResourceUrl());
     hostLevelParams.put(JAVA_HOME, managementController.getJavaHome());
@@ -1153,6 +1158,11 @@ public class AmbariCustomCommandExecutionHelper {
     hostLevelParams.put(HOST_SYS_PREPPED, configs.areHostsSysPrepped());
     hostLevelParams.put(AGENT_STACK_RETRY_ON_UNAVAILABILITY, configs.isAgentStackRetryOnInstallEnabled());
     hostLevelParams.put(AGENT_STACK_RETRY_COUNT, configs.getAgentStackRetryOnInstallCount());
+
+    Set<String> notManagedHdfsPathSet = configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster);
+    String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
+    hostLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
+
     ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName());
     if (clusterVersionEntity == null) {
       List<ClusterVersionEntity> clusterVersionEntityList = clusterVersionDAO

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 0b26f61..962eb43 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -170,6 +170,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_R
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_TIMEOUT;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_DRIVER_FILENAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GROUP_LIST;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MAX_DURATION_OF_RETRIES;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.PACKAGE_LIST;
@@ -2184,6 +2185,10 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     String groupList = gson.toJson(groupSet);
     hostParams.put(GROUP_LIST, groupList);
 
+    Set<String> notManagedHdfsPathSet = configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster);
+    String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
+    hostParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
+
     DatabaseType databaseType = configs.getDatabaseType();
     if (databaseType == DatabaseType.ORACLE) {
       hostParams.put(DB_DRIVER_FILENAME, configs.getOjdbcJarName());

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
index 5ba53dc..648e573 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
@@ -83,6 +83,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JCE_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_LOCATION;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MYSQL_JDBC_URL;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.ORACLE_JDBC_URL;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.PACKAGE_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_REPO_INFO;
@@ -339,6 +340,10 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
       String groupList = gson.toJson(groupSet);
       hostLevelParams.put(GROUP_LIST, groupList);
 
+      Set<String> notManagedHdfsPathSet = configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster);
+      String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
+      hostLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
+
       String jsonConfigurations = null;
       Map<String, Object> commandParams = new HashMap<String, Object>();
       List<Map<String, String>> xmlConfigs = new LinkedList<Map<String, String>>();

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
index 53837db..e7c9c27 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
@@ -245,6 +245,7 @@ public class PropertyInfo {
     GROUP,
     TEXT,
     ADDITIONAL_USER_PROPERTY,
-    DONT_ADD_ON_UPGRADE
+    DONT_ADD_ON_UPGRADE,
+    NOT_MANAGED_HDFS_PATH
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
index a8aebbf..a21035e 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
@@ -21,6 +21,7 @@ from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import format
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.get_bare_principal import get_bare_principal
@@ -195,5 +196,6 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
 )

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index acc0fcb..12c0f25 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -23,6 +23,7 @@ from functions import check_append_heap_property
 from functions import trim_heap_property
 from resource_management.core.logger import Logger
 from resource_management import *
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 import status_params
 from ambari_commons import OSCheck
 import ConfigParser
@@ -318,7 +319,8 @@ HdfsResource = functools.partial(
   hadoop_conf_dir = hadoop_conf_dir,
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
-  default_fs = default_fs
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources()
  )
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml
index 9eae0d4..3fdf2e6 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml
@@ -101,7 +101,19 @@
       <overridable>false</overridable>
     </value-attributes>
   </property>
-  
+  <property>
+    <name>falcon_apps_hdfs_dir</name>
+    <value>/apps/falcon</value>
+    <description>Falcon Apps HDFS Dir</description>
+    <display-name>Falcon Apps HDFS Dir</display-name>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+  </property>
+
   <!-- falcon-env.sh -->
   <property>
     <name>content</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py
index ca967b0..b131574 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py
@@ -126,7 +126,7 @@ def falcon(type, action = None, upgrade_type=None):
           create_parents = True)
 
       # TODO change to proper mode
-      params.HdfsResource(params.flacon_apps_dir,
+      params.HdfsResource(params.falcon_apps_dir,
         type = "directory",
         action = "create_on_execute",
         owner = params.falcon_user,
@@ -231,4 +231,4 @@ def falcon(type, action = None, upgrade_type=None):
       Service(params.falcon_win_service_name, action = "start")
 
     if action == 'stop':
-      Service(params.falcon_win_service_name, action = "stop")
\ No newline at end of file
+      Service(params.falcon_win_service_name, action = "stop")

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
index 707c4ed..115ed4f 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
@@ -23,6 +23,7 @@ from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 import os
@@ -95,7 +96,7 @@ falcon_startup_properties = config['configurations']['falcon-startup.properties'
 smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 falcon_env_sh_template = config['configurations']['falcon-env']['content']
 
-flacon_apps_dir = '/apps/falcon'
+falcon_apps_dir = config['configurations']['falcon-env']['falcon_apps_hdfs_dir']
 #for create_hdfs_directory
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 hostname = config["hostname"]
@@ -155,6 +156,7 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
  )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/params.py
index 50a8fda..1bdc5aa 100644
--- a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/params.py
@@ -24,6 +24,7 @@ from resource_management.libraries.functions.default import default
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.resources.xml_config import XmlConfig
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 
 config = Script.get_config()
 config_attrs = config['configuration_attributes']
@@ -74,7 +75,8 @@ HdfsResource = functools.partial(HdfsResource,
                                  kinit_path_local=kinit_path_local,
                                  principal_name=hdfs_principal_name,
                                  hdfs_site=hdfs_site,
-                                 default_fs=default_fs)
+                                 default_fs=default_fs,
+                                 immutable_paths = get_not_managed_resources())
 
 # File partial function
 File = functools.partial(File,

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 63e548a..b32aedf 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -35,6 +35,7 @@ from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions import is_empty
 from resource_management.libraries.functions import get_unique_id_and_date
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.script.script import Script
 
 # server configurations
@@ -238,6 +239,7 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
 )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
index 61eccce..98f20e7 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
@@ -185,6 +185,18 @@
     </value-attributes>
   </property>
   <property>
+    <name>hdfs_tmp_dir</name>
+    <value>/tmp</value>
+    <description>HDFS tmp Dir</description>
+    <display-name>HDFS tmp Dir</display-name>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+  </property>
+  <property>
     <name>hdfs_user_nofile_limit</name>
     <value>128000</value>
     <description>Max open files limit setting for HDFS user.</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index 2b417ac..e4c8c9c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -259,7 +259,7 @@ def create_name_dirs(directories):
 def create_hdfs_directories(check):
   import params
 
-  params.HdfsResource("/tmp",
+  params.HdfsResource(params.hdfs_tmp_dir,
                        type="directory",
                        action="create_on_execute",
                        owner=params.hdfs_user,

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index f0bf4d2..905802f 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -33,6 +33,7 @@ from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_klist_path
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 
@@ -75,6 +76,8 @@ dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
 dfs_dn_ipc_address = config['configurations']['hdfs-site']['dfs.datanode.ipc.address']
 secure_dn_ports_are_in_use = False
 
+hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+
 # hadoop default parameters
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
@@ -337,6 +340,7 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
 )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py
index b4f44ae..737ae04 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py
@@ -34,7 +34,7 @@ class HdfsServiceCheckDefault(HdfsServiceCheck):
 
     env.set_params(params)
     unique = functions.get_unique_id_and_date()
-    dir = '/tmp'
+    dir = params.hdfs_tmp_dir
     tmp_file = format("{dir}/{unique}")
 
     safemode_command = format("dfsadmin -fs {namenode_address} -safemode get | grep OFF")
@@ -119,7 +119,7 @@ class HdfsServiceCheckWindows(HdfsServiceCheck):
     unique = functions.get_unique_id_and_date()
 
     #Hadoop uses POSIX-style paths, separator is always /
-    dir = '/tmp'
+    dir = params.hdfs_tmp_dir
     tmp_file = dir + '/' + unique
 
     #commands for execution

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
index c63c289..7cbe4e0 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
@@ -107,6 +107,7 @@ limitations under the License.
     <name>hive.metastore.warehouse.dir</name>
     <value>/apps/hive/warehouse</value>
     <description>location of default database for the warehouse</description>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 63ad482..9084c87 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -35,6 +35,7 @@ from resource_management.libraries.functions.is_empty import is_empty
 from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.copy_tarball import STACK_VERSION_PATTERN
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions.get_port_from_url import get_port_from_url
 from resource_management.libraries import functions
@@ -458,6 +459,7 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
  )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
index 4e73730..d3a0606 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
@@ -30,7 +30,7 @@ from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import get_kinit_path
-
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 
 # server configurations
 config = Script.get_config()
@@ -282,5 +282,6 @@ HdfsResource = functools.partial(
   hadoop_conf_dir = hadoop_conf_dir,
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
-  default_fs = default_fs
-)
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources()
+)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
index 297f77d..4f9dac6 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
@@ -32,6 +32,7 @@ from status_params import *
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 
 # server configurations
 config = Script.get_config()
@@ -351,5 +352,6 @@ HdfsResource = functools.partial(
   hadoop_conf_dir = hadoop_conf_dir,
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
-  default_fs = default_fs
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources()
 )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py b/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
index 2c57e96..7a0de63 100644
--- a/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
@@ -25,6 +25,7 @@ from resource_management.libraries.functions import format
 from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.script.script import Script
 
 # server configurations
@@ -91,5 +92,6 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
 )

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
index 0decbc2..7cbcb1e 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
@@ -27,6 +27,7 @@ from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions import get_port_from_url
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.script.script import Script
 
 from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
@@ -277,6 +278,7 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
 )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
index ff41105..707a7a4 100644
--- a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
@@ -26,6 +26,7 @@ from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 
 # server configurations
 config = Script.get_config()
@@ -92,6 +93,7 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
  )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/params.py
index 1dbed45..0bae970 100644
--- a/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/params.py
@@ -23,6 +23,7 @@ from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.namenode_ha_utils import get_active_namenode
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 
 config = Script.get_config()
 
@@ -83,5 +84,6 @@ HdfsResource = functools.partial(HdfsResource,
     kinit_path_local=kinit_path_local,
     principal_name=hdfs_principal_name,
     hdfs_site=hdfs_site,
-    default_fs=default_fs)
+    default_fs=default_fs,
+    immutable_paths = get_not_managed_resources())
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py
index b1cec11..aa5d3a5 100644
--- a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py
@@ -23,6 +23,7 @@ from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 
 # server configurations
 config = Script.get_config()
@@ -72,5 +73,6 @@ HdfsResource = functools.partial(
   hadoop_conf_dir = hadoop_conf_dir,
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
-  default_fs = default_fs
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources()
 )

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
index 843d8e7..906b198 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
@@ -31,6 +31,7 @@ from resource_management.libraries.functions.get_stack_version import get_stack_
 from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 
 from resource_management.libraries.script.script import Script
 
@@ -197,5 +198,6 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
index 89ffcad..ab935a1 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
@@ -33,6 +33,7 @@ from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 
 # server configurations
 config = Script.get_config()
@@ -303,5 +304,6 @@ HdfsResource = functools.partial(
   hadoop_conf_dir = hadoop_conf_dir,
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
-  default_fs = default_fs
-)
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources()
+)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
index cc87973..eb80ad6 100644
--- a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
@@ -25,6 +25,7 @@ from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.script.script import Script
 
 # server configurations
@@ -96,6 +97,7 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
 )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-site.xml
index f30b807..3e78c37 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-site.xml
@@ -252,6 +252,7 @@
     <description>
       Directory where history files are managed by the MR JobHistory Server.
     </description>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
   </property>
 
   <property>       

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
index 59d4964..8b00139 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
@@ -292,6 +292,7 @@
     <name>yarn.nodemanager.remote-app-log-dir</name>
     <value>/app-logs</value>
     <description>Location to aggregate logs to. </description>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index e02a55d..8b2aec5 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -26,6 +26,7 @@ from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries import functions
@@ -281,6 +282,7 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
  )
 update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index 784a88f..3fb82e9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -111,4 +111,14 @@ gpgcheck=0</value>
         </value-attributes>
     </property>
 
+    <property>
+        <name>managed_hdfs_resource_property_names</name>
+        <value></value>
+        <description>Comma separated list of property names with HDFS resource paths.
+        Resource from this list will be managed even if it is marked as not managed in the stack</description>
+        <value-attributes>
+            <overridable>false</overridable>
+            <empty-value-valid>true</empty-value-valid>
+        </value-attributes>
+    </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar
index 4544f6b..da81264 100644
Binary files a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar and b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar differ

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index a19d969..7153c54 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -28,6 +28,7 @@ from resource_management.libraries.functions.version import format_stack_version
 from ambari_commons.os_check import OSCheck
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 
 config = Script.get_config()
@@ -42,6 +43,8 @@ hadoop_conf_dir = "/etc/hadoop/conf"
 
 component_list = default("/localComponents", [])
 
+hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+
 # hadoop default params
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 
@@ -308,5 +311,6 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
 )

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
index b121b31..ba9c8fb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
@@ -157,7 +157,7 @@ def create_javahome_symlink():
 
 def create_dirs():
    import params
-   params.HdfsResource("/tmp",
+   params.HdfsResource(params.hdfs_tmp_dir,
                        type="directory",
                        action="create_on_execute",
                        owner=params.hdfs_user,

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/stacks/HDP/2.1/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1/services/HIVE/configuration/hive-site.xml
index 56dc017..9a8f986 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/HIVE/configuration/hive-site.xml
@@ -89,6 +89,7 @@ limitations under the License.
     <name>hive.metastore.warehouse.dir</name>
     <value>/apps/hive/warehouse</value>
     <description>location of default database for the warehouse</description>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
index 25bb468..61fcbb9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
@@ -342,6 +342,7 @@ limitations under the License.
     <name>hive.metastore.warehouse.dir</name>
     <value>/apps/hive/warehouse</value>
     <description>location of default database for the warehouse</description>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py
index 9c55b42..6363c59 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py
@@ -91,7 +91,7 @@ class ECSClient(Script):
   def create_dirs(self,env):
     import params
     env.set_params(params)
-    params.HdfsResource("/tmp",
+    params.HdfsResource(params.hdfs_tmp_dir,
                        type="directory",
                        action="create_on_execute",
                        owner=params.hdfs_user,

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
index 713c3b4..c304a93 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
@@ -23,6 +23,7 @@ import os
 import itertools
 import re
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import stack_select
 
@@ -57,6 +58,8 @@ hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
 tmp_dir = Script.get_tmp_dir()
 hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
 
+hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+
 hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
@@ -78,5 +81,6 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
 )

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml
index e2389b3..53040d7 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml
@@ -310,6 +310,7 @@ limitations under the License.
     <name>hive.metastore.warehouse.dir</name>
     <value>/apps/hive/warehouse</value>
     <description>location of default database for the warehouse</description>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index 59ae56e..b7f9e75 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -54,6 +54,7 @@ import org.apache.ambari.server.security.ldap.LdapBatchDto;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.MaintenanceState;
@@ -78,6 +79,7 @@ import javax.persistence.RollbackException;
 import java.lang.reflect.Field;
 import java.lang.reflect.Modifier;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -90,6 +92,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOST_SYS_
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_VERSION;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_VERSION;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
 import static org.easymock.EasyMock.anyBoolean;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.capture;
@@ -1971,6 +1974,8 @@ public class AmbariManagementControllerImplTest {
     String JCE_NAME = "jceName";
     String OJDBC_JAR_NAME = "OjdbcJarName";
     String SERVER_DB_NAME = "ServerDBName";
+    Set<String> notManagedHdfsPathSet = new HashSet<>(Arrays.asList("/tmp", "/apps/falcon"));
+    Gson gson = new Gson();
 
     ActionManager manager = createNiceMock(ActionManager.class);
     StackId stackId = createNiceMock(StackId.class);
@@ -1980,6 +1985,7 @@ public class AmbariManagementControllerImplTest {
     ClusterVersionDAO clusterVersionDAO = createNiceMock(ClusterVersionDAO.class);
     ClusterVersionEntity clusterVersionEntity = createNiceMock(ClusterVersionEntity.class);
     RepositoryVersionEntity repositoryVersionEntity = createNiceMock(RepositoryVersionEntity.class);
+    ConfigHelper configHelper = createNiceMock(ConfigHelper.class);
 
     expect(cluster.getClusterName()).andReturn(clusterName);
     expect(cluster.getDesiredStackVersion()).andReturn(stackId);
@@ -1996,8 +2002,11 @@ public class AmbariManagementControllerImplTest {
     expect(clusterVersionDAO.findByClusterAndStateCurrent(clusterName)).andReturn(clusterVersionEntity).anyTimes();
     expect(clusterVersionEntity.getRepositoryVersion()).andReturn(repositoryVersionEntity).anyTimes();
     expect(repositoryVersionEntity.getVersion()).andReturn("1234").anyTimes();
+    expect(configHelper.getPropertyValuesWithPropertyType(stackId, PropertyInfo.PropertyType.NOT_MANAGED_HDFS_PATH,
+        cluster)).andReturn(notManagedHdfsPathSet);
 
-    replay(manager, clusters, cluster, injector, stackId, configuration, clusterVersionDAO, clusterVersionEntity, repositoryVersionEntity);
+    replay(manager, clusters, cluster, injector, stackId, configuration, clusterVersionDAO, clusterVersionEntity,
+        repositoryVersionEntity, configHelper);
 
     AmbariManagementControllerImpl ambariManagementControllerImpl =
         createMockBuilder(AmbariManagementControllerImpl.class)
@@ -2028,14 +2037,24 @@ public class AmbariManagementControllerImplTest {
     f.setAccessible(true);
     f.set(helper, clusterVersionDAO);
 
+    f = helperClass.getDeclaredField("configHelper");
+    f.setAccessible(true);
+    f.set(helper, configHelper);
+
+    f = helperClass.getDeclaredField("gson");
+    f.setAccessible(true);
+    f.set(helper, gson);
+
     Map<String, String> defaultHostParams = helper.createDefaultHostParams(cluster);
 
-    assertEquals(defaultHostParams.size(), 15);
+    assertEquals(defaultHostParams.size(), 16);
     assertEquals(defaultHostParams.get(DB_DRIVER_FILENAME), MYSQL_JAR);
     assertEquals(defaultHostParams.get(STACK_NAME), SOME_STACK_NAME);
     assertEquals(defaultHostParams.get(STACK_VERSION), SOME_STACK_VERSION);
     assertEquals("true", defaultHostParams.get(HOST_SYS_PREPPED));
     assertEquals("8", defaultHostParams.get(JAVA_VERSION));
+    assertNotNull(defaultHostParams.get(NOT_MANAGED_HDFS_PATH_LIST));
+    assertTrue(defaultHostParams.get(NOT_MANAGED_HDFS_PATH_LIST).contains("/tmp"));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
index e08e184..195d388 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
@@ -27,6 +27,7 @@ from stacks.utils.RMFTestCase import *
 class TestMetricsCollector(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "AMBARI_METRICS/0.1.0/package"
   STACK_VERSION = "2.0.6"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   def test_start_default_distributed(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metrics_collector.py",
@@ -311,6 +312,7 @@ class TestMetricsCollector(RMFTestCase):
     if name == 'master':
       if distributed:
         self.assertResourceCalled('HdfsResource', 'hdfs://localhost:8020/apps/hbase/data',
+                                  immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                                   security_enabled = False,
                                   hadoop_bin_dir = '/usr/bin',
                                   keytab = UnknownConfigurationMock(),
@@ -327,6 +329,7 @@ class TestMetricsCollector(RMFTestCase):
                                   default_fs='hdfs://c6401.ambari.apache.org:8020',
                                   )
         self.assertResourceCalled('HdfsResource', '/amshbase/staging',
+                                  immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                                   security_enabled = False,
                                   hadoop_bin_dir = '/usr/bin',
                                   keytab = UnknownConfigurationMock(),
@@ -343,6 +346,7 @@ class TestMetricsCollector(RMFTestCase):
                                   default_fs='hdfs://c6401.ambari.apache.org:8020',
                                   )
         self.assertResourceCalled('HdfsResource', None,
+                                  immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                                   security_enabled = False,
                                   hadoop_bin_dir = '/usr/bin',
                                   keytab = UnknownConfigurationMock(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 389ae74..e042fc1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -27,6 +27,7 @@ class TestHBaseMaster(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "HBASE/0.96.0.2.0/package"
   STACK_VERSION = "2.0.6"
   TMP_PATH = "/hadoop"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   def test_install_hbase_master_default_no_phx(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
@@ -336,6 +337,7 @@ class TestHBaseMaster(RMFTestCase):
     )
 
     self.assertResourceCalled('HdfsResource', 'hdfs://c6401.ambari.apache.org:8020/apps/hbase/data',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -349,6 +351,7 @@ class TestHBaseMaster(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_conf_dir = '/etc/hadoop/conf',
         keytab = UnknownConfigurationMock(),
@@ -363,6 +366,7 @@ class TestHBaseMaster(RMFTestCase):
         mode = 0711,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -474,6 +478,7 @@ class TestHBaseMaster(RMFTestCase):
                               content='log4jproperties\nline2'
     )
     self.assertResourceCalled('HdfsResource', 'hdfs://c6401.ambari.apache.org:8020/apps/hbase/data',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -487,6 +492,7 @@ class TestHBaseMaster(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_conf_dir = '/etc/hadoop/conf',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -501,6 +507,7 @@ class TestHBaseMaster(RMFTestCase):
         mode = 0711,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -621,6 +628,7 @@ class TestHBaseMaster(RMFTestCase):
                               content='log4jproperties\nline2')
 
     self.assertResourceCalled('HdfsResource', 'hdfs://nn1/apps/hbase/data',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
@@ -636,6 +644,7 @@ class TestHBaseMaster(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
     )
     self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
@@ -652,6 +661,7 @@ class TestHBaseMaster(RMFTestCase):
         mode = 0711,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),


[18/21] ambari git commit: AMBARI-15291: Edit text/labels in Add/Activate HAWQ Standby Master wizard (Lav Jain via mithmatt)

Posted by jl...@apache.org.
AMBARI-15291: Edit text/labels in Add/Activate HAWQ Standby Master wizard (Lav Jain via mithmatt)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ac6b0d1f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ac6b0d1f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ac6b0d1f

Branch: refs/heads/AMBARI-13364
Commit: ac6b0d1fbbe459633cb02e17971dc365f57290ce
Parents: 76627aa
Author: Matt <mm...@pivotal.io>
Authored: Thu Mar 10 12:22:13 2016 -0800
Committer: Matt <mm...@pivotal.io>
Committed: Thu Mar 10 12:22:13 2016 -0800

----------------------------------------------------------------------
 ambari-web/app/messages.js | 70 ++++++++++++++++++++++-------------------
 1 file changed, 38 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ac6b0d1f/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 6b8317b..d74f6b4 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -2904,24 +2904,27 @@ Em.I18n.translations = {
   ' instructions to complete or reverting adding HAWQ Standby Master. Are you sure you want to exit the wizard?',
   'admin.addHawqStandby.wizard.header': 'Add HAWQ Standby Master Wizard',
   'admin.addHawqStandby.wizard.step1.header': 'Get Started',
-  'admin.addHawqStandby.wizard.step1.body':'This wizard will walk you through adding a HAWQ Standby Master to your cluster.<br/>' +
-      'Once added, you will be running a HAWQ Standby Master in addition to the current HAWQ Master.<br/>' +
-      'This allows for Active-Standby HAWQ configuration that can be used to perform a manual failover.<br/><br/>' +
-      '<b>You should plan a cluster maintenance window and prepare for cluster downtime when adding HAWQ Standby' +
-      ' Master. HAWQ cluster will be stopped and started during the process. </b><br/><br/>',
+  'admin.addHawqStandby.wizard.step1.body':'This wizard walks you through the process of adding the HAWQ Standby ' +
+      'Master as a backup of the current HAWQ Master host. After you add the HAWQ Standby Master, it serves as a <i>warm standby</i> ' +
+      'which may be activated in the event of the primary HAWQ Master host becoming non-operational.<br/><br/>' +
+      '<b>This procedure restarts the HAWQ service. Perform this procedure during a scheduled cluster maintenance window.</b>',
   'admin.addHawqStandby.wizard.step2.header': 'Select Host',
   'admin.addHawqStandby.wizard.step2.body': 'Select a host that will be running the HAWQ Standby Master',
   'admin.addHawqStandby.wizard.step3.header': 'Review',
   'admin.addHawqStandby.wizard.step3.configs_changes': 'Review Configuration Changes.',
   'admin.addHawqStandby.wizard.step3.confirm.host.body':'<b>Confirm your host selections.</b>',
   'admin.addHawqStandby.wizard.step3.confirm.config.body':'<div class="alert alert-info">' +
-      '<b>Review Configuration Changes.</b></br>' +
-      'The following lists the configuration changes that will be made by the Wizard to add HAWQ Standby Master. This information is for <b> review only </b> and is not editable.' +
-      '</div>',
+      '<b>Review Configuration Changes.</b><br/><br/>' +
+      'The following lists the configuration changes that will be made by the Wizard to add HAWQ Standby Master. ' +
+      'This information is for <b> review only </b> and is not editable.</div>',
   'admin.addHawqStandby.wizard.step3.hawqMaster': 'Current HAWQ Master',
   'admin.addHawqStandby.wizard.step3.newHawqStandby': 'New HAWQ Standby Master',
   'admin.addHawqStandby.wizard.step3.confirm.dataDir.title': 'HAWQ Standby Master Directory Confirmation',
-  'admin.addHawqStandby.wizard.step3.confirm.dataDir.body': 'Please confirm that the HAWQ data directory <b>{0}</b> on the Standby Master <b>{1}</b> does not exist or is empty.</br>If there is pre-existing data then HAWQ Standby will get initialized with stale data.',
+  'admin.addHawqStandby.wizard.step3.confirm.dataDir.body': 'Before you complete this procedure, ensure that you ' +
+      'rename the directory <b>{0}</b> on the HAWQ Standby Master host <b>{1}</b> if it exists (for example, ' +
+      'change it to {0}_old).<br/><br/><b>If {0} exists on the HAWQ Standby Master host, then the new HAWQ ' +
+      'Standby Master may be started with stale data, leaving the cluster in an inconsistent state.</b><br/><br/>' +
+      'Click Confirm to indicate that you have renamed any existing <b>{0}</b> directory on the HAWQ Standby Master host <b>{1}</b>.',
   'admin.addHawqStandby.step4.save.configuration.note': 'This configuration is created by Add HAWQ Standby wizard',
   'admin.addHawqStandby.wizard.step4.header': 'Configure Components',
   'admin.addHawqStandby.wizard.step4.task0.title': 'Stop HAWQ Service',
@@ -2959,41 +2962,44 @@ Em.I18n.translations = {
   'admin.removeHawqStandby.wizard.step3.notice.completed':'HAWQ Standby has been removed successfully.',
   'admin.removeHawqStandby.wizard.step3.removeHawqStandbyCommand.context': 'Execute HAWQ Standby remove command',
   'admin.removeHawqStandby.wizard.step3.save.configuration.note': 'This configuration was created by Remove HAWQ Standby wizard',
-  'admin.activateHawqStandby.button.enable': 'Activate HAWQ Standby',
-  'admin.activateHawqStandby.wizard.header': 'Activate HAWQ Standby Wizard',
+  'admin.activateHawqStandby.button.enable': 'Activate HAWQ Standby Master',
+  'admin.activateHawqStandby.wizard.header': 'Activate HAWQ Standby Master Wizard',
   'admin.activateHawqStandby.wizard.step1.header': 'Get Started',
-  'admin.activateHawqStandby.wizard.step1.body':'This wizard will walk you through activating HAWQ Standby.' +
-      '<br/>Once activated, HAWQ Standby will become HAWQ Master, and the previous HAWQ master will be removed. ' +
-      'Users will be able to connect to HAWQ database using the new HAWQ Master.' +
-      '<br/><br/><b>You should plan a cluster maintenance window and prepare for cluster downtime when ' +
-      'activating HAWQ Standby. During this operation, HAWQ service will be stopped and started.</b>' +
-      '<br/><br/>Note: In order to add standby for HAWQ service, you can use the "Add HAWQ Standby"</b> wizard.',
+  'admin.activateHawqStandby.wizard.step1.body': 'This wizard walks you through the process of activating the HAWQ Standby Master ' +
+      'in the event of HAWQ Master host failure. After you activate the HAWQ Standby Master, ' +
+      'it is promoted as the new HAWQ Master, and the previous HAWQ Master configuration ' +
+      'is removed from the cluster.<br/><br/><b>This procedure restarts the HAWQ service. ' +
+      'Perform this procedure during a scheduled cluster maintenance window, unless the current ' +
+      'HAWQ Master is not functioning.</b><br/><br/> After you complete this wizard, the HAWQ cluster ' +
+      'will no longer have a Standby Master. As a best practice, use the “Add HAWQ Standby Master” ' +
+      'service action to configure a new HAWQ Standby Master for the cluster.',
   'admin.activateHawqStandby.wizard.step2.header': 'Review',
   'admin.highAvailability.wizard.step2.toBeDeleted': 'TO BE DELETED',
   'admin.activateHawqStandby.wizard.step2.hawqMaster': '<b>Current HAWQ Master:</b>',
-  'admin.activateHawqStandby.wizard.step2.hawqStandby': '<b>New HAWQ Master:</b>',
-  'admin.activateHawqStandby.wizard.step2.toBeActivated': 'STANDBY TO BE ACTIVATED',
-  'admin.activateHawqStandby.wizard.step2.confirm.config.body':'<div class="alert alert-info">' +
-      '<b>Review Configuration Changes.</b></br>The following lists the configuration changes that will be ' +
-      'made by the Wizard to activate HAWQ Standby Master. This information is for <b> review only </b> and is not' +
-      ' editable.<br/><br/><b>Note:</b> hawq_standby_address_host property will be removed from hawq-site.xml as ' +
-      'HAWQ Standby will be activated to HAWQ Master.</div>',
-  'admin.activateHawqStandby.wizard.step2.confirm.host.body':'<b>Review HAWQ Master & Standby role changes.</b>',
-  'admin.activateHawqStandby.wizard.step2.confirmPopup.body': 'Do you wish to continue with activating HAWQ Standy? Please confirm, before proceeding as you will not be able to rollback from Ambari.',
+  'admin.activateHawqStandby.wizard.step2.hawqStandby': '<b>Current HAWQ Standby Master:</b>',
+  'admin.activateHawqStandby.wizard.step2.toBeActivated': 'TO BE ACTIVATED AS NEW HAWQ MASTER',
+  'admin.activateHawqStandby.wizard.step2.confirm.config.body': '<div class="alert alert-info">' +
+      '<b>Review Configuration Changes.</b><br/><br/>The Wizard will make the following configuration changes. '+
+      'This information is for review only, and cannot be edited.<br/><br/><b>After activating the HAWQ Standby ' +
+      'Master, the wizard removes the hawq_standby_address_host property from hawq-site.xml.</b> ' +
+      'As a best practice, you should configure a new HAWQ Standby Master host after the wizard completes.</div>',
+  'admin.activateHawqStandby.wizard.step2.confirm.host.body':'<b>Review HAWQ Master & Standby Master role changes.</b>',
+  'admin.activateHawqStandby.wizard.step2.confirmPopup.body': 'Do you wish to continue with activating HAWQ Standy Master? ' +
+      'Please confirm, before proceeding as you will not be able to rollback from Ambari.',
   'admin.activateHawqStandby.wizard.step3.header': 'Finalize Setup',
-  'admin.activateHawqStandby.wizard.step3.task0.title': 'Activate HAWQ Standby',
+  'admin.activateHawqStandby.wizard.step3.task0.title': 'Activate HAWQ Standby Master',
   'admin.activateHawqStandby.wizard.step3.task1.title': 'Stop HAWQ Service',
   'admin.activateHawqStandby.wizard.step3.task2.title': 'Reconfigure HAWQ',
   'admin.activateHawqStandby.wizard.step3.task3.title': 'Install Role: New HAWQ Master',
-  'admin.activateHawqStandby.wizard.step3.task4.title': 'Delete Role: Old HAWQ Master',
-  'admin.activateHawqStandby.wizard.step3.task5.title': 'Delete Role: Old HAWQ Standby',
+  'admin.activateHawqStandby.wizard.step3.task4.title': 'Delete Role: Previous HAWQ Master',
+  'admin.activateHawqStandby.wizard.step3.task5.title': 'Delete Role: Previous HAWQ Standby',
   'admin.activateHawqStandby.wizard.step3.task6.title': 'Start HAWQ Service',
   'admin.activateHawqStandby.closePopup':'Activate HAWQ Standby Wizard is in progress. You must allow the wizard to' +
       ' complete for Ambari to be in usable state. If you choose to quit, you must follow manual instructions to' +
       ' get back to a stable state. Are you sure you want to exit the wizard?',
-  'admin.activateHawqStandby.wizard.step3.notice.inProgress':'Please wait while HAWQ Standby is being activated',
-  'admin.activateHawqStandby.wizard.step3.notice.completed':'HAWQ Standby has been activated successfully.',
-  'admin.activateHawqStandby.wizard.step3.activateHawqStandbyCommand.context': "Execute HAWQ Standby activate command",
+  'admin.activateHawqStandby.wizard.step3.notice.inProgress':'Please wait while HAWQ Standby Master is being activated',
+  'admin.activateHawqStandby.wizard.step3.notice.completed':'HAWQ Standby Master has been activated successfully.',
+  'admin.activateHawqStandby.wizard.step3.activateHawqStandbyCommand.context': "Execute HAWQ Standby Master activate command",
   'admin.serviceAutoStart.save.popup.title': 'Save Auto-Start Configuration',
   'admin.serviceAutoStart.save.popup.body': 'You are changing the auto-start configuration.' +
       'Click <b>Save</b> to commit the change or <b>Discard</b> to revert your changes',


[03/21] ambari git commit: AMBARI-15321. Adding support for Hive Server Interactive Alerts (Swapan Shridhar via smohanty)

Posted by jl...@apache.org.
AMBARI-15321. Adding support for Hive Server Interactive Alerts (Swapan Shridhar via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/98b2b238
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/98b2b238
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/98b2b238

Branch: refs/heads/AMBARI-13364
Commit: 98b2b238e68b5632c9ea4da970baedf1c2918398
Parents: 0cc8382
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Wed Mar 9 18:15:47 2016 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Wed Mar 9 18:15:47 2016 -0800

----------------------------------------------------------------------
 .../common-services/HIVE/0.12.0.2.0/alerts.json |  45 ++++
 .../alert_hive_interactive_thrift_port.py       | 212 +++++++++++++++++++
 2 files changed, 257 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/98b2b238/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/alerts.json b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/alerts.json
index cf99435..2ff7069 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/alerts.json
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/alerts.json
@@ -91,6 +91,51 @@
         }
       }
     ],
+    "HIVE_SERVER_INTERACTIVE": [
+      {
+        "name": "hive_server_interactive_process",
+        "label": "HiveServer2 Interactive Process",
+        "description": "This host-level alert is triggered if the HiveServerInteractive cannot be determined to be up and responding to client requests.",
+        "interval": 3,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HIVE/0.12.0.2.0/package/alerts/alert_hive_interactive_thrift_port.py",
+          "parameters": [
+            {
+              "name": "check.command.timeout",
+              "display_name": "Check command timeout",
+              "value": 60.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before check command will be killed by timeout",
+              "units": "seconds"
+            },
+            {
+              "name": "default.smoke.user",
+              "display_name": "Default Smoke User",
+              "value": "ambari-qa",
+              "type": "STRING",
+              "description": "The user that will run the Hive commands if not specified in cluster-env/smokeuser"
+            },
+            {
+              "name": "default.smoke.principal",
+              "display_name": "Default Smoke Principal",
+              "value": "ambari-qa@EXAMPLE.COM",
+              "type": "STRING",
+              "description": "The principal to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_principal_name"
+            },
+            {
+              "name": "default.smoke.keytab",
+              "display_name": "Default Smoke Keytab",
+              "value": "/etc/security/keytabs/smokeuser.headless.keytab",
+              "type": "STRING",
+              "description": "The keytab to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_keytab"
+            }
+          ]
+        }
+      }
+    ],
     "WEBHCAT_SERVER": [
       {
         "name": "hive_webhcat_server_status",

http://git-wip-us.apache.org/repos/asf/ambari/blob/98b2b238/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_interactive_thrift_port.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_interactive_thrift_port.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_interactive_thrift_port.py
new file mode 100644
index 0000000..e390fcd
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_interactive_thrift_port.py
@@ -0,0 +1,212 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+import socket
+import time
+import logging
+import traceback
+from resource_management.libraries.functions import hive_check
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import get_kinit_path
+from ambari_commons.os_check import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+OK_MESSAGE = "TCP OK - {0:.3f}s response on port {1}"
+CRITICAL_MESSAGE = "Connection failed on host {0}:{1} ({2})"
+
+HIVE_SERVER_INTERACTIVE_THRIFT_PORT_KEY = '{{hive-interactive-site/hive.server2.thrift.port}}'
+HIVE_SERVER_INTERACTIVE_THRIFT_HTTP_PORT_KEY = '{{hive-interactive-site/hive.server2.thrift.http.port}}'
+HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_KEY = '{{hive-interactive-site/hive.server2.transport.mode}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_KEY = '{{hive.server2.transport.mode/hive.server2.authentication}}'
+HIVE_SERVER_INTERACTIVE_PRINCIPAL_KEY = '{{hive-interactive-site/hive.server2.authentication.kerberos.principal}}'
+SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
+SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+HIVE_SSL = '{{hive-interactive-site/hive.server2.use.SSL}}'
+HIVE_SSL_KEYSTORE_PATH = '{{hive-interactive-site/hive.server2.keystore.path}}'
+HIVE_SSL_KEYSTORE_PASSWORD = '{{hive-interactive-site/hive.server2.keystore.password}}'
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+THRIFT_PORT_DEFAULT = 10500
+HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_DEFAULT = 'binary'
+HIVE_SERVER_INTERACTIVE_PRINCIPAL_DEFAULT = 'hive/_HOST@EXAMPLE.COM'
+HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_DEFAULT = 'NOSASL'
+
+# default keytab location
+SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
+SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
+
+# default smoke principal
+SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
+SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
+
+# default smoke user
+SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
+SMOKEUSER_DEFAULT = 'ambari-qa'
+
+HADOOPUSER_KEY = '{{cluster-env/hadoop.user.name}}'
+HADOOPUSER_DEFAULT = 'hadoop'
+
+CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
+CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
+
+logger = logging.getLogger('ambari_alerts')
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HIVE_SERVER_INTERACTIVE_THRIFT_PORT_KEY, SECURITY_ENABLED_KEY, SMOKEUSER_KEY,
+          HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_KEY, HIVE_SERVER_INTERACTIVE_PRINCIPAL_KEY,
+          SMOKEUSER_KEYTAB_KEY, SMOKEUSER_PRINCIPAL_KEY, HIVE_SERVER_INTERACTIVE_THRIFT_HTTP_PORT_KEY,
+          HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_SSL,
+          HIVE_SSL_KEYSTORE_PATH, HIVE_SSL_KEYSTORE_PASSWORD)
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def get_tokens():
+  pass
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return ('UNKNOWN', ['There were no configurations supplied to the script.'])
+
+  transport_mode = HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_DEFAULT
+  if HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_KEY in configurations:
+    transport_mode = configurations[HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_KEY]
+
+  port = THRIFT_PORT_DEFAULT
+  if transport_mode.lower() == 'binary' and HIVE_SERVER_INTERACTIVE_THRIFT_PORT_KEY in configurations:
+    port = int(configurations[HIVE_SERVER_INTERACTIVE_THRIFT_PORT_KEY])
+  elif transport_mode.lower() == 'http' and HIVE_SERVER_INTERACTIVE_THRIFT_HTTP_PORT_KEY in configurations:
+    port = int(configurations[HIVE_SERVER_INTERACTIVE_THRIFT_HTTP_PORT_KEY])
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+  check_command_timeout = CHECK_COMMAND_TIMEOUT_DEFAULT
+  if CHECK_COMMAND_TIMEOUT_KEY in parameters:
+    check_command_timeout = float(parameters[CHECK_COMMAND_TIMEOUT_KEY])
+
+  hive_server2_authentication = HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_DEFAULT
+  if HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_KEY in configurations:
+    hive_server2_authentication = configurations[HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_KEY]
+
+  hive_ssl = False
+  if HIVE_SSL in configurations:
+    hive_ssl = configurations[HIVE_SSL]
+
+  hive_ssl_keystore_path = None
+  if HIVE_SSL_KEYSTORE_PATH in configurations:
+    hive_ssl_keystore_path = configurations[HIVE_SSL_KEYSTORE_PATH]
+
+  hive_ssl_keystore_password = None
+  if HIVE_SSL_KEYSTORE_PASSWORD in configurations:
+    hive_ssl_keystore_password = configurations[HIVE_SSL_KEYSTORE_PASSWORD]
+
+  # defaults
+  smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
+  smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
+  smokeuser = SMOKEUSER_DEFAULT
+
+  # check script params
+  if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
+    smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
+
+  if SMOKEUSER_SCRIPT_PARAM_KEY in parameters:
+    smokeuser = parameters[SMOKEUSER_SCRIPT_PARAM_KEY]
+
+  if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
+    smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
+
+
+  # check configurations last as they should always take precedence
+  if SMOKEUSER_PRINCIPAL_KEY in configurations:
+    smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
+
+  if SMOKEUSER_KEY in configurations:
+    smokeuser = configurations[SMOKEUSER_KEY]
+
+  result_code = None
+
+  if security_enabled:
+    hive_server_principal = HIVE_SERVER_INTERACTIVE_PRINCIPAL_DEFAULT
+    if HIVE_SERVER_INTERACTIVE_PRINCIPAL_KEY in configurations:
+      hive_server_principal = configurations[HIVE_SERVER_INTERACTIVE_PRINCIPAL_KEY]
+
+    if SMOKEUSER_KEYTAB_KEY in configurations:
+      smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
+
+    # Get the configured Kerberos executable search paths, if any
+    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+      kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+    else:
+      kerberos_executable_search_paths = None
+
+    kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+    kinitcmd=format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal}; ")
+  else:
+    hive_server_principal = None
+    kinitcmd=None
+
+  try:
+    if host_name is None:
+      host_name = socket.getfqdn()
+
+    start_time = time.time()
+
+    try:
+      hive_check.check_thrift_port_sasl(host_name, port, hive_server2_authentication, hive_server_principal,
+                                        kinitcmd, smokeuser, transport_mode=transport_mode, ssl=hive_ssl,
+                                        ssl_keystore=hive_ssl_keystore_path, ssl_password=hive_ssl_keystore_password,
+                                        check_command_timeout=int(check_command_timeout))
+      result_code = 'OK'
+      total_time = time.time() - start_time
+      label = OK_MESSAGE.format(total_time, port)
+    except:
+      result_code = 'CRITICAL'
+      label = CRITICAL_MESSAGE.format(host_name, port, traceback.format_exc())
+
+  except:
+    label = traceback.format_exc()
+    result_code = 'UNKNOWN'
+
+  return (result_code, [label])
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def execute(configurations={}, parameters={}, host_name=None):
+  pass
\ No newline at end of file


[09/21] ambari git commit: AMBARI-15228. Ambari overwrites permissions on HDFS directories (aonishuk)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqmaster.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqmaster.py b/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqmaster.py
index 18205f7..944c540 100644
--- a/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqmaster.py
+++ b/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqmaster.py
@@ -27,6 +27,7 @@ class TestHawqMaster(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = 'HAWQ/2.0.0/package'
   STACK_VERSION = '2.3'
   GPADMIN = 'gpadmin'
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   def __asserts_for_configure(self):
 
@@ -167,6 +168,7 @@ class TestHawqMaster(RMFTestCase):
         )
 
     self.assertResourceCalled('HdfsResource', '/hawq_default',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         default_fs = u'hdfs://c6401.ambari.apache.org:8020',
         hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         type = 'directory',
@@ -183,6 +185,7 @@ class TestHawqMaster(RMFTestCase):
         )
 
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         default_fs = u'hdfs://c6401.ambari.apache.org:8020',
         hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         action = ['execute'],
@@ -230,6 +233,7 @@ class TestHawqMaster(RMFTestCase):
         )
 
     self.assertResourceCalled('HdfsResource', '/hawq_default',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         default_fs = u'hdfs://c6401.ambari.apache.org:8020',
         hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         type = 'directory',
@@ -246,6 +250,7 @@ class TestHawqMaster(RMFTestCase):
         )
 
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         default_fs = u'hdfs://c6401.ambari.apache.org:8020',
         hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         action = ['execute'],

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py b/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py
index 78ae3fe..7a3f010 100644
--- a/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py
@@ -26,6 +26,7 @@ from only_for_platform import not_for_platform, PLATFORM_WINDOWS
 class TestMahoutClient(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "MAHOUT/1.0.0.2.3/package"
   STACK_VERSION = "2.3"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   def test_configure_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
@@ -41,6 +42,7 @@ class TestMahoutClient(RMFTestCase):
         mode = 0755,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mahoutsmokeoutput',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
@@ -52,6 +54,7 @@ class TestMahoutClient(RMFTestCase):
         type = 'directory',
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mahoutsmokeinput',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
@@ -64,6 +67,7 @@ class TestMahoutClient(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mahoutsmokeinput/sample-mahout-test.txt',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
@@ -77,6 +81,7 @@ class TestMahoutClient(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py b/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
index c23fd96..674f30d 100644
--- a/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
+++ b/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
@@ -28,6 +28,7 @@ from only_for_platform import not_for_platform, PLATFORM_WINDOWS
 class TestSparkThriftServer(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "SPARK/1.2.0.2.2/package"
   STACK_VERSION = "2.3"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
   def test_configure_default(self, copy_to_hdfs_mock):
@@ -91,6 +92,7 @@ class TestSparkThriftServer(RMFTestCase):
         mode = 0775
     )
     self.assertResourceCalled('HdfsResource', '/user/spark',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
@@ -107,6 +109,7 @@ class TestSparkThriftServer(RMFTestCase):
         mode = 0775,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py b/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
index 9197cf1..a274d45 100644
--- a/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
+++ b/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
@@ -36,6 +36,7 @@ origin_exists = os.path.exists
 class TestAts(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
   STACK_VERSION = "2.3"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   def test_configure_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
@@ -151,6 +152,7 @@ class TestAts(RMFTestCase):
                               cd_access = 'a',
                               )
     self.assertResourceCalled('HdfsResource', '/ats',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               security_enabled = False,
                               hadoop_bin_dir = '/usr/bin',
                               keytab = UnknownConfigurationMock(),
@@ -169,6 +171,7 @@ class TestAts(RMFTestCase):
                               mode = 0755,
                               )
     self.assertResourceCalled('HdfsResource', '/ats/done',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               security_enabled = False,
                               hadoop_bin_dir = '/usr/bin',
                               keytab = UnknownConfigurationMock(),
@@ -186,6 +189,7 @@ class TestAts(RMFTestCase):
                               mode = 0700,
                               )
     self.assertResourceCalled('HdfsResource', '/ats',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               security_enabled = False,
                               hadoop_bin_dir = '/usr/bin',
                               keytab = UnknownConfigurationMock(),
@@ -204,6 +208,7 @@ class TestAts(RMFTestCase):
                               mode = 0755,
                               )
     self.assertResourceCalled('HdfsResource', '/ats/active',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               security_enabled = False,
                               hadoop_bin_dir = '/usr/bin',
                               keytab = UnknownConfigurationMock(),
@@ -221,6 +226,7 @@ class TestAts(RMFTestCase):
                               mode = 01777,
                               )
     self.assertResourceCalled('HdfsResource', None,
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               security_enabled = False,
                               hadoop_bin_dir = '/usr/bin',
                               keytab = UnknownConfigurationMock(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json b/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
index 2ae9584..3fe1235 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
@@ -3,7 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
         "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
@@ -531,6 +532,7 @@
             "xml_configurations_supported" : "false"
         },
       "cluster-env": {
+          "managed_hdfs_resource_property_names": "",
           "security_enabled": "false",
           "hdfs_user_principal" : "",
           "hdfs_user_keytab" : "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.3/configs/default.hbasedecom.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/default.hbasedecom.json b/ambari-server/src/test/python/stacks/2.3/configs/default.hbasedecom.json
index db157af..fff0024 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/default.hbasedecom.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/default.hbasedecom.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -428,6 +429,7 @@
             "min_user_id": "1000"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.3/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/default.json b/ambari-server/src/test/python/stacks/2.3/configs/default.json
index 253e833..d5102dc 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/default.json
@@ -3,6 +3,7 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
@@ -63,6 +64,7 @@
             "yarn.resourcemanager.scheduler.address": "c6401.ambari.apache.org:8030"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.3/configs/hawq_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/hawq_default.json b/ambari-server/src/test/python/stacks/2.3/configs/hawq_default.json
index 241547d..99d43cb 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/hawq_default.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/hawq_default.json
@@ -82,6 +82,7 @@
     "clusterName": "phd", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.8.0_60", 
@@ -1027,6 +1028,7 @@
             "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false", 
             "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
             "ignore_groupsusers_create": "false", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json b/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json
index f560704..0da58ce 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json
@@ -39,6 +39,7 @@
     "clusterName": "c1", 
     "hostname": "c6405.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6405.ambari.apache.org:8080/resources/", 
@@ -313,6 +314,7 @@
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json b/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json
index 3a8133a..8ecb91b 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json
@@ -44,6 +44,7 @@
     "clusterName": "c1", 
     "hostname": "c6405.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6405.ambari.apache.org:8080/resources/", 
@@ -578,6 +579,7 @@
             "content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nversion: 1\n\n# Please replace with site specific values\ndn: dc=hadoop,dc=apache,dc=org\nobjectclass: organization\nobjectclass: dcObject\no: Hadoop\ndc: hadoop\n\n#
  Entry for a sample people container\n# Please replace with site specific values\ndn: ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:organizationalUnit\nou: people\n\n# Entry for a sample end user\n# Please replace with site specific values\ndn: uid=guest,ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: Guest\nsn: User\nuid: guest\nuserPassword:guest-password\n\n# entry for sample user admin\ndn: uid=admin,ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: Admin\nsn: Admin\nuid: admin\nuserPassword:admin-password\n\n# entry for sample user sam\ndn: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: sam\nsn: sam\nuid: sam\nuserPassword:sam-password\n\n# entry for sample user tom\ndn: uid=tom,
 ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: tom\nsn: tom\nuid: tom\nuserPassword:tom-password\n\n# create FIRST Level groups branch\ndn: ou=groups,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:organizationalUnit\nou: groups\ndescription: generic groups branch\n\n# create the analyst group under groups\ndn: cn=analyst,ou=groups,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass: groupofnames\ncn: analyst\ndescription:analyst  group\nmember: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\nmember: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n\n\n# create the scientist group under groups\ndn: cn=scientist,ou=groups,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass: groupofnames\ncn: scientist\ndescription: scientist group\nmember: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "true",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.3/configs/pxf_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/pxf_default.json b/ambari-server/src/test/python/stacks/2.3/configs/pxf_default.json
index 1f6afe4..10cd9a4 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/pxf_default.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/pxf_default.json
@@ -3,6 +3,7 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
@@ -52,6 +53,7 @@
             "content": " profile"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.3/configs/spark_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/spark_default.json b/ambari-server/src/test/python/stacks/2.3/configs/spark_default.json
index 80411ec..ce982a1 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/spark_default.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/spark_default.json
@@ -3,6 +3,7 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred",
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
@@ -199,6 +200,7 @@
             "yarn.resourcemanager.scheduler.address": "c6401.ambari.apache.org:8030"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.3/configs/storm_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/storm_default.json b/ambari-server/src/test/python/stacks/2.3/configs/storm_default.json
index 2de83a8..72afd78 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/storm_default.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/storm_default.json
@@ -21,7 +21,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.8.0_40", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
@@ -173,6 +174,7 @@
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.3/configs/storm_default_secure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/storm_default_secure.json b/ambari-server/src/test/python/stacks/2.3/configs/storm_default_secure.json
index 3b92500..c1b7e10 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/storm_default_secure.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/storm_default_secure.json
@@ -21,7 +21,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.8.0_40", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
@@ -184,6 +185,7 @@
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "true",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Resource.java
----------------------------------------------------------------------
diff --git a/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Resource.java b/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Resource.java
index d774ab8..9ef7660 100644
--- a/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Resource.java
+++ b/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Resource.java
@@ -44,6 +44,7 @@ public class Resource {
   private boolean recursiveChown;
   private boolean recursiveChmod;
   private boolean changePermissionforParents;
+  private boolean manageIfExists;
 
   public String getSource() {
     return source;
@@ -125,16 +126,22 @@ public class Resource {
     this.changePermissionforParents = changePermissionforParents;
   }
 
-  
-  
-  
+  public boolean isManageIfExists() {
+    return manageIfExists;
+  }
+
+  public void setManageIfExists(boolean manageIfExists) {
+    this.manageIfExists = manageIfExists;
+  }
+
   @Override
   public String toString() {
     return "Resource [source=" + source + ", target=" + target + ", type="
         + type + ", action=" + action + ", owner=" + owner + ", group=" + group
         + ", mode=" + mode + ", recursiveChown=" + recursiveChown
         + ", recursiveChmod=" + recursiveChmod
-        + ", changePermissionforParents=" + changePermissionforParents + "]";
+        + ", changePermissionforParents=" + changePermissionforParents
+        + ", manageIfExists=" + manageIfExists + "]";
   }
 
   /*

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Runner.java
----------------------------------------------------------------------
diff --git a/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Runner.java b/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Runner.java
index 0c10ca7..e4656c7 100644
--- a/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Runner.java
+++ b/contrib/fast-hdfs-resource/src/main/java/org/apache/ambari/fast_hdfs_resource/Runner.java
@@ -72,6 +72,12 @@ public class Runner {
         Resource.checkResourceParameters(resource, dfs);
 
         Path pathHadoop = new Path(resource.getTarget());
+        if (!resource.isManageIfExists() && dfs.exists(pathHadoop)) {
+          System.out.println("Skipping the operation for not managed DFS directory " + resource.getTarget() +
+                             " since immutable_paths contains it.");
+          continue;
+        }
+
         if (resource.getAction().equals("create")) {
           // 5 - Create
           Resource.createResource(resource, dfs, pathHadoop);


[11/21] ambari git commit: AMBARI-15228. Ambari overwrites permissions on HDFS directories (aonishuk)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index 3655317..9ece2a3 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -32,6 +32,7 @@ from resource_management.core.exceptions import Fail
 class TestNamenode(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
   STACK_VERSION = "2.0.6"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   def test_configure_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
@@ -95,6 +96,7 @@ class TestNamenode(RMFTestCase):
         logoutput=True
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if=True,
         keytab = UnknownConfigurationMock(),
@@ -112,6 +114,7 @@ class TestNamenode(RMFTestCase):
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if=True,
         keytab = UnknownConfigurationMock(),
@@ -129,6 +132,7 @@ class TestNamenode(RMFTestCase):
         mode = 0770,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if=True,
         keytab = UnknownConfigurationMock(),
@@ -208,6 +212,7 @@ class TestNamenode(RMFTestCase):
         logoutput=True
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if = True,
         keytab = UnknownConfigurationMock(),
@@ -225,6 +230,7 @@ class TestNamenode(RMFTestCase):
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if = True,
         keytab = UnknownConfigurationMock(),
@@ -242,6 +248,7 @@ class TestNamenode(RMFTestCase):
         mode = 0770,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if = True,
         keytab = UnknownConfigurationMock(),
@@ -337,6 +344,7 @@ class TestNamenode(RMFTestCase):
         logoutput=True
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -351,6 +359,7 @@ class TestNamenode(RMFTestCase):
         only_if = True
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -365,6 +374,7 @@ class TestNamenode(RMFTestCase):
         only_if = True
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         only_if = True,
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -433,6 +443,7 @@ class TestNamenode(RMFTestCase):
         logoutput=True
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
@@ -450,6 +461,7 @@ class TestNamenode(RMFTestCase):
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
@@ -467,6 +479,7 @@ class TestNamenode(RMFTestCase):
         mode = 0770,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
@@ -530,6 +543,7 @@ class TestNamenode(RMFTestCase):
         logoutput=True
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
@@ -547,6 +561,7 @@ class TestNamenode(RMFTestCase):
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
@@ -564,6 +579,7 @@ class TestNamenode(RMFTestCase):
         mode = 0770,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
@@ -633,6 +649,7 @@ class TestNamenode(RMFTestCase):
         logoutput=True
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -650,6 +667,7 @@ class TestNamenode(RMFTestCase):
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -667,6 +685,7 @@ class TestNamenode(RMFTestCase):
         mode = 0770,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -736,6 +755,7 @@ class TestNamenode(RMFTestCase):
         logoutput=True
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
@@ -753,6 +773,7 @@ class TestNamenode(RMFTestCase):
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
@@ -770,6 +791,7 @@ class TestNamenode(RMFTestCase):
         mode = 0770,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
@@ -838,6 +860,7 @@ class TestNamenode(RMFTestCase):
         logoutput=True
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
         keytab = UnknownConfigurationMock(),
@@ -855,6 +878,7 @@ class TestNamenode(RMFTestCase):
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
         keytab = UnknownConfigurationMock(),
@@ -872,6 +896,7 @@ class TestNamenode(RMFTestCase):
         mode = 0770,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
         keytab = UnknownConfigurationMock(),
@@ -948,6 +973,7 @@ class TestNamenode(RMFTestCase):
                               logoutput=True
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               security_enabled = False,
                               only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
                               keytab = UnknownConfigurationMock(),
@@ -965,6 +991,7 @@ class TestNamenode(RMFTestCase):
                               mode = 0777,
                               )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               security_enabled = False,
                               only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
                               keytab = UnknownConfigurationMock(),
@@ -982,6 +1009,7 @@ class TestNamenode(RMFTestCase):
                               mode = 0770,
                               )
     self.assertResourceCalled('HdfsResource', None,
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               security_enabled = False,
                               only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
                               keytab = UnknownConfigurationMock(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
index 4c66c4f..bbc1b3a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
@@ -25,6 +25,7 @@ from mock.mock import MagicMock, call, patch
 class TestServiceCheck(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
   STACK_VERSION = "2.0.6"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   def test_service_check_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
@@ -60,6 +61,7 @@ class TestServiceCheck(RMFTestCase):
         user = 'hdfs',
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -75,6 +77,7 @@ class TestServiceCheck(RMFTestCase):
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/tmp/',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -89,6 +92,7 @@ class TestServiceCheck(RMFTestCase):
         type = 'file',
     )
     self.assertResourceCalled('HdfsResource', '/tmp/',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -104,6 +108,7 @@ class TestServiceCheck(RMFTestCase):
         type = 'file',
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
index 5335605..338d3d3 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
@@ -38,6 +38,7 @@ class TestHiveServer(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
   STACK_VERSION = "2.0.6"
   UPGRADE_STACK_VERSION = "2.2"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   def setUp(self):
     Logger.logger = MagicMock()
@@ -331,6 +332,7 @@ class TestHiveServer(RMFTestCase):
   def assert_configure_default(self, no_tmp = False, default_fs_default='hdfs://c6401.ambari.apache.org:8020'):
     # Verify creating of Hcat and Hive directories
     self.assertResourceCalled('HdfsResource', '/apps/webhcat',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -344,6 +346,7 @@ class TestHiveServer(RMFTestCase):
         mode = 0755,
     )
     self.assertResourceCalled('HdfsResource', '/user/hcat',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -362,6 +365,7 @@ class TestHiveServer(RMFTestCase):
       return
 
     self.assertResourceCalled('HdfsResource', '/apps/hive/warehouse',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -375,6 +379,7 @@ class TestHiveServer(RMFTestCase):
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/hive',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -389,6 +394,7 @@ class TestHiveServer(RMFTestCase):
     )
     if not no_tmp:
       self.assertResourceCalled('HdfsResource', '/custompath/tmp/hive',
+          immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
           security_enabled = False,
           hadoop_conf_dir = '/etc/hadoop/conf',
           keytab = UnknownConfigurationMock(),
@@ -403,6 +409,7 @@ class TestHiveServer(RMFTestCase):
           mode = 0777,
       )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -519,6 +526,7 @@ class TestHiveServer(RMFTestCase):
 
   def assert_configure_secured(self):
     self.assertResourceCalled('HdfsResource', '/apps/webhcat',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -532,6 +540,7 @@ class TestHiveServer(RMFTestCase):
         mode = 0755,
     )
     self.assertResourceCalled('HdfsResource', '/user/hcat',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -546,6 +555,7 @@ class TestHiveServer(RMFTestCase):
     )
 
     self.assertResourceCalled('HdfsResource', '/apps/hive/warehouse',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -559,6 +569,7 @@ class TestHiveServer(RMFTestCase):
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/hive',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -572,6 +583,7 @@ class TestHiveServer(RMFTestCase):
         mode = 0755,
     )
     self.assertResourceCalled('HdfsResource', '/custompath/tmp/hive',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_conf_dir = '/etc/hadoop/conf',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -586,6 +598,7 @@ class TestHiveServer(RMFTestCase):
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -920,6 +933,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
     copy_to_hdfs_mock.assert_any_call("tez", "hadoop", "hdfs", host_sys_prepped=False)
     self.assertEquals(2, copy_to_hdfs_mock.call_count)
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
@@ -962,6 +976,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
     copy_to_hdfs_mock.assert_any_call("tez", "hadoop", "hdfs", host_sys_prepped=False)
     self.assertEquals(2, copy_to_hdfs_mock.call_count)
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
index 5646b75..6c4dc00 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
@@ -30,6 +30,7 @@ import resource_management.libraries.functions
 class TestServiceCheck(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
   STACK_VERSION = "2.0.6"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
 
   def test_service_check_default(self, socket_mock):
@@ -87,6 +88,7 @@ class TestServiceCheck(RMFTestCase):
         owner="hdfs"
     )
     self.assertResourceCalled('HdfsResource', '/tmp/idtest.ambari-qa.1431110511.43.pig',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -103,6 +105,7 @@ class TestServiceCheck(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
     )
     self.assertResourceCalled('HdfsResource', '/tmp/idtest.ambari-qa.1431110511.43.in',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -119,6 +122,7 @@ class TestServiceCheck(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -199,6 +203,7 @@ class TestServiceCheck(RMFTestCase):
         owner = "hdfs"
     )
     self.assertResourceCalled('HdfsResource', '/tmp/idtest.ambari-qa.1431110511.43.pig',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -215,6 +220,7 @@ class TestServiceCheck(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
     )
     self.assertResourceCalled('HdfsResource', '/tmp/idtest.ambari-qa.1431110511.43.in',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -231,6 +237,7 @@ class TestServiceCheck(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index 7db4b26..1f44967 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -34,6 +34,7 @@ class TestOozieServer(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "OOZIE/4.0.0.2.0/package"
   STACK_VERSION = "2.0.6"
   UPGRADE_STACK_VERSION = "2.2"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   def setUp(self):
     self.maxDiff = None
@@ -64,6 +65,7 @@ class TestOozieServer(RMFTestCase):
                        call_mocks = call_mocks
     )
     self.assertResourceCalled('HdfsResource', '/user/oozie',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -77,6 +79,7 @@ class TestOozieServer(RMFTestCase):
         mode = 0775,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -279,6 +282,7 @@ class TestOozieServer(RMFTestCase):
                        call_mocks = call_mocks
     )
     self.assertResourceCalled('HdfsResource', '/user/oozie',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               security_enabled = False,
                               hadoop_bin_dir = '/usr/bin',
                               keytab = UnknownConfigurationMock(),
@@ -295,6 +299,7 @@ class TestOozieServer(RMFTestCase):
                               mode = 0775,
                               )
     self.assertResourceCalled('HdfsResource', None,
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               security_enabled = False,
                               hadoop_bin_dir = '/usr/bin',
                               keytab = UnknownConfigurationMock(),
@@ -511,6 +516,7 @@ class TestOozieServer(RMFTestCase):
         user = 'oozie',
     )
     self.assertResourceCalled('HdfsResource', '/user/oozie/share',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -527,6 +533,7 @@ class TestOozieServer(RMFTestCase):
         mode = 0755,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -606,6 +613,7 @@ class TestOozieServer(RMFTestCase):
         user = 'oozie',
     )
     self.assertResourceCalled('HdfsResource', '/user/oozie/share',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -622,6 +630,7 @@ class TestOozieServer(RMFTestCase):
         mode = 0755,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -661,6 +670,7 @@ class TestOozieServer(RMFTestCase):
 
   def assert_configure_default(self):
     self.assertResourceCalled('HdfsResource', '/user/oozie',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_conf_dir = '/etc/hadoop/conf',
         keytab = UnknownConfigurationMock(),
@@ -674,6 +684,7 @@ class TestOozieServer(RMFTestCase):
         mode = 0775,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -848,6 +859,7 @@ class TestOozieServer(RMFTestCase):
 
   def assert_configure_secured(self):
     self.assertResourceCalled('HdfsResource', '/user/oozie',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_conf_dir = '/etc/hadoop/conf',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -862,6 +874,7 @@ class TestOozieServer(RMFTestCase):
         mode = 0775,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -1392,6 +1405,7 @@ class TestOozieServer(RMFTestCase):
       user = 'oozie', logoutput = True )
 
     self.assertResourceCalled('HdfsResource', '/user/oozie/share',
+      immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
       security_enabled = False,
       hadoop_bin_dir = '/usr/hdp/2.3.0.0-1234/hadoop/bin',
       keytab = UnknownConfigurationMock(),
@@ -1410,6 +1424,7 @@ class TestOozieServer(RMFTestCase):
       mode = 0755 )
 
     self.assertResourceCalled('HdfsResource', None,
+      immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
       security_enabled = False,
       hadoop_bin_dir = '/usr/hdp/2.3.0.0-1234/hadoop/bin',
       keytab = UnknownConfigurationMock(),
@@ -1465,6 +1480,7 @@ class TestOozieServer(RMFTestCase):
       user = 'oozie', logoutput = True )
 
     self.assertResourceCalled('HdfsResource', '/user/oozie/share',
+      immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
       security_enabled = False,
       hadoop_bin_dir = '/usr/hdp/2.3.0.0-1234/hadoop/bin',
       keytab = UnknownConfigurationMock(),
@@ -1483,6 +1499,7 @@ class TestOozieServer(RMFTestCase):
       mode = 0755 )
 
     self.assertResourceCalled('HdfsResource', None,
+      immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
       security_enabled = False,
       hadoop_bin_dir = '/usr/hdp/2.3.0.0-1234/hadoop/bin',
       keytab = UnknownConfigurationMock(),
@@ -1588,4 +1605,4 @@ class TestOozieServer(RMFTestCase):
 
     self.assertEquals(
       ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'oozie', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
-       mocks_dict['call'].call_args_list[0][0][0])
\ No newline at end of file
+       mocks_dict['call'].call_args_list[0][0][0])

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py
index 462c361..84ecf76 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py
@@ -26,6 +26,7 @@ from mock.mock import MagicMock, call, patch
 class TestServiceCheck(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "OOZIE/4.0.0.2.0/package"
   STACK_VERSION = "2.0.6"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
   
   def test_service_check_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
@@ -83,6 +84,7 @@ class TestServiceCheck(RMFTestCase):
         try_sleep = 5,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/examples',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -94,6 +96,7 @@ class TestServiceCheck(RMFTestCase):
         type = 'directory',
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/examples',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -108,6 +111,7 @@ class TestServiceCheck(RMFTestCase):
         group = 'hadoop'
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/input-data',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -119,6 +123,7 @@ class TestServiceCheck(RMFTestCase):
         type = 'directory',
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/input-data',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -133,6 +138,7 @@ class TestServiceCheck(RMFTestCase):
         group = 'hadoop'
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
index 1187e97..100d78a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
@@ -23,7 +23,8 @@ from stacks.utils.RMFTestCase import *
 class TestPigServiceCheck(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "PIG/0.12.0.2.0/package"
   STACK_VERSION = "2.0.6"
-  
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
+
   def test_configure_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
                        classname = "PigServiceCheck",
@@ -33,6 +34,7 @@ class TestPigServiceCheck(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/pigsmoke.out',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -48,6 +50,7 @@ class TestPigServiceCheck(RMFTestCase):
         action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/passwd',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -64,6 +67,7 @@ class TestPigServiceCheck(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -107,6 +111,7 @@ class TestPigServiceCheck(RMFTestCase):
     )
     
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/pigsmoke.out',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -122,6 +127,7 @@ class TestPigServiceCheck(RMFTestCase):
         action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/passwd',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -138,6 +144,7 @@ class TestPigServiceCheck(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
index 85959aa..bc0e07e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
@@ -33,6 +33,7 @@ origin_exists = os.path.exists
 class TestHistoryServer(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
   STACK_VERSION = "2.0.6"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
   
   def test_configure_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
@@ -58,6 +59,7 @@ class TestHistoryServer(RMFTestCase):
     pid_check_cmd = 'ls /var/run/hadoop-mapreduce/mapred/mapred-mapred-historyserver.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop-mapreduce/mapred/mapred-mapred-historyserver.pid` >/dev/null 2>&1'
 
     self.assertResourceCalled("HdfsResource", "/apps/tez/",
+                          immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                           type="directory",
                           action=["create_on_execute"],
                           hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
@@ -76,6 +78,7 @@ class TestHistoryServer(RMFTestCase):
                           )
 
     self.assertResourceCalled("HdfsResource", "/apps/tez/lib/",
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               type="directory",
                               action=["create_on_execute"],
                               hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
@@ -94,6 +97,7 @@ class TestHistoryServer(RMFTestCase):
     )
 
     self.assertResourceCalled("HdfsResource", None,
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               action=['execute'],
                               hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               user=u'hdfs',
@@ -197,6 +201,7 @@ class TestHistoryServer(RMFTestCase):
   def assert_configure_default(self):
 
     self.assertResourceCalled('HdfsResource', '/app-logs',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_conf_dir = '/etc/hadoop/conf',
         keytab = UnknownConfigurationMock(),
@@ -212,6 +217,7 @@ class TestHistoryServer(RMFTestCase):
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/tmp/entity-file-history/active',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -228,6 +234,7 @@ class TestHistoryServer(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
     )
     self.assertResourceCalled('HdfsResource', '/mapred',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -240,6 +247,7 @@ class TestHistoryServer(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/mapred/system',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -252,6 +260,7 @@ class TestHistoryServer(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/mr-history/done',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_conf_dir = '/etc/hadoop/conf',
         keytab = UnknownConfigurationMock(),
@@ -267,6 +276,7 @@ class TestHistoryServer(RMFTestCase):
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -443,6 +453,7 @@ class TestHistoryServer(RMFTestCase):
   def assert_configure_secured(self):
 
     self.assertResourceCalled('HdfsResource', '/app-logs',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_conf_dir = '/etc/hadoop/conf',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -458,6 +469,7 @@ class TestHistoryServer(RMFTestCase):
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/tmp/entity-file-history/active',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -474,6 +486,7 @@ class TestHistoryServer(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
     )
     self.assertResourceCalled('HdfsResource', '/mapred',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -486,6 +499,7 @@ class TestHistoryServer(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/mapred/system',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -498,6 +512,7 @@ class TestHistoryServer(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/mr-history/done',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_conf_dir = '/etc/hadoop/conf',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -513,6 +528,7 @@ class TestHistoryServer(RMFTestCase):
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -838,6 +854,7 @@ class TestHistoryServer(RMFTestCase):
     self.assert_call_to_get_hadoop_conf_dir()
 
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py
index 82ad240..8d79deb 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py
@@ -29,6 +29,7 @@ origin_exists = os.path.exists
 class TestServiceCheck(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
   STACK_VERSION = "2.0.6"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   def test_service_check_default(self):
 
@@ -40,6 +41,7 @@ class TestServiceCheck(RMFTestCase):
                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mapredsmokeoutput',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -51,6 +53,7 @@ class TestServiceCheck(RMFTestCase):
         type = 'directory',
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mapredsmokeinput',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -63,6 +66,7 @@ class TestServiceCheck(RMFTestCase):
         type = 'file',
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -97,6 +101,7 @@ class TestServiceCheck(RMFTestCase):
                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mapredsmokeoutput',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -108,6 +113,7 @@ class TestServiceCheck(RMFTestCase):
         type = 'directory',
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mapredsmokeinput',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -120,6 +126,7 @@ class TestServiceCheck(RMFTestCase):
         type = 'file',
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
index e45303d..ff78609 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -442,6 +443,7 @@
             "min_user_id": "1000"
         }, 
         "hadoop-env": {
+            "hdfs_tmp_dir": "/tmp",
             "namenode_opt_maxnewsize": "200m", 
             "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
@@ -472,6 +474,7 @@
             "hive_database": "New MySQL Database"
         },
       "cluster-env": {
+        "managed_hdfs_resource_property_names": "",
         "security_enabled": "false",
         "hdfs_user_principal" : "",
         "hdfs_user_keytab" : "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
index d3f7f15..47ab0a3 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
@@ -47,6 +47,7 @@
     "clusterName": "c1", 
     "hostname": "c6403.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
@@ -572,6 +573,7 @@
             "zk_user": "zookeeper"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
index 92d2052..f8d9b21 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -428,6 +429,7 @@
             "min_user_id": "1000"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 590fb23..fa1ef22 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -3,6 +3,7 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "agentCacheDir": "/var/lib/ambari-agent/cache",
@@ -505,6 +506,7 @@
             "is_supported_yarn_ranger": "false"
         },
         "hadoop-env": {
+            "hdfs_tmp_dir": "/tmp",
             "namenode_opt_maxnewsize": "200m",
             "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m",
@@ -538,6 +540,7 @@
             "xml_configurations_supported" : "false"
         },
       "cluster-env": {
+          "managed_hdfs_resource_property_names": "",
           "security_enabled": "false",
           "hdfs_user_principal" : "",
           "hdfs_user_keytab" : "",
@@ -612,6 +615,7 @@
             "storm_user": "storm"
         },
         "falcon-env": {
+            "falcon_apps_hdfs_dir": "/apps/falcon",
             "falcon_port": "15000",
             "falcon_pid_dir": "/var/run/falcon",
             "falcon_log_dir": "/var/log/falcon",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
index 1b0a995..26dc966 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
@@ -3,7 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
         "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
@@ -420,6 +421,7 @@
             "min_user_id": "1000"
         },
         "cluster-env": {
+          "managed_hdfs_resource_property_names": "",
           "security_enabled": "false",
           "ignore_groupsusers_create": "false",
           "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
index dd932d3..688dbe0 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agentCacheDir": "/var/lib/ambari-agent/cache",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
@@ -530,6 +531,7 @@
             "xml_configurations_supported" : "false"
         },
       "cluster-env": {
+          "managed_hdfs_resource_property_names": "",
           "security_enabled": "false",
           "hdfs_user_principal" : "",
           "hdfs_user_keytab" : "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
index 1918606..b1cc572 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -459,6 +460,7 @@
             "min_user_id": "1000"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
index 33270ce..66f103b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -276,6 +277,7 @@
             "hive_database": "New MySQL Database"
         },
       "cluster-env": {
+        "managed_hdfs_resource_property_names": "",
         "security_enabled": "false",
         "hdfs_user_principal" : "",
         "hdfs_user_keytab" : "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
index 6f1aef1..d2f5a05 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -278,6 +279,7 @@
             "hive_database": "New MySQL Database"
         },
       "cluster-env": {
+        "managed_hdfs_resource_property_names": "",
         "security_enabled": "false",
         "hdfs_user_principal" : "",
         "hdfs_user_keytab" : "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
index a0beca9..4648ffc 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -531,6 +532,7 @@
             "xml_configurations_supported" : "false"
         },
       "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "hdfs_user_principal" : "",
             "hdfs_user_keytab" : "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
index f2ec21b..4d58ded 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -501,6 +502,7 @@
             "hive_database": "New MySQL Database"
         },
       "cluster-env": {
+        "managed_hdfs_resource_property_names": "",
         "security_enabled": "false",
         "hdfs_user_principal" : "",
         "hdfs_user_keytab" : "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
index cec5431..b2543c8 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -470,6 +471,7 @@
             "hive_database": "New MySQL Database"
         },
       "cluster-env": {
+        "managed_hdfs_resource_property_names": "",
         "security_enabled": "false",
         "hdfs_user_principal" : "",
         "hdfs_user_keytab" : "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
index 3acc31b..79f0f86 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -462,6 +463,7 @@
             "hive_database": "New MySQL Database"
         },
       "cluster-env": {
+        "managed_hdfs_resource_property_names": "",
         "security_enabled": "false",
         "ignore_groupsusers_create": "false",
         "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
index ce49e37..d9bc7f9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
@@ -3,7 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
         "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
@@ -460,6 +461,7 @@
             "hive_database": "New MySQL Database"
         },
       "cluster-env": {
+        "managed_hdfs_resource_property_names": "",
         "security_enabled": "false",
         "ignore_groupsusers_create": "false",
         "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
index 4f81af8..1e39008 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
@@ -3,7 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
         "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json
index 8da4816..8648395 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json
@@ -3,7 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
         "jce_name": "UnlimitedJCEPolicyJDK7.zip", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
index adb97c9..cfd5b17 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
@@ -4,6 +4,7 @@
     "hostname": "c6401.ambari.apache.org", 
     "passiveInfo": [], 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -370,6 +371,7 @@
             "min_user_id": "1000"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",
@@ -377,6 +379,7 @@
             "user_group": "hadoop"
         },
         "hadoop-env": {
+            "hdfs_tmp_dir": "/tmp",
             "namenode_opt_maxnewsize": "200m",
             "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
index 40e18d2..ee88983 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
@@ -4,6 +4,7 @@
     "hostname": "c6402.ambari.apache.org", 
     "passiveInfo": [], 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -370,6 +371,7 @@
             "min_user_id": "1000"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",
@@ -377,6 +379,7 @@
             "user_group": "hadoop"
         },
         "hadoop-env": {
+            "hdfs_tmp_dir": "/tmp",
             "namenode_opt_maxnewsize": "200m",
             "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
index b9e3b44..1a6dd39 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
@@ -4,6 +4,7 @@
     "hostname": "c6402.ambari.apache.org", 
     "passiveInfo": [], 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -371,6 +372,7 @@
             "min_user_id": "1000"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",
@@ -378,6 +380,7 @@
             "user_group": "hadoop"
         },
         "hadoop-env": {
+            "hdfs_tmp_dir": "/tmp",
             "namenode_opt_maxnewsize": "200m",
             "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
index a0a2ee0..6b4873a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
@@ -4,6 +4,7 @@
     "hostname": "c6401.ambari.apache.org", 
     "passiveInfo": [], 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -373,6 +374,7 @@
             "min_user_id": "1000"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",
@@ -380,6 +382,7 @@
             "user_group": "hadoop"
         },
         "hadoop-env": {
+            "hdfs_tmp_dir": "/tmp",
             "namenode_opt_maxnewsize": "200m",
             "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
index 5799708..bf815a2 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
@@ -4,6 +4,7 @@
     "hostname": "c6401.ambari.apache.org", 
     "passiveInfo": [], 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -349,6 +350,7 @@
             "min_user_id": "1000" 
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "true",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",
@@ -358,6 +360,7 @@
             "kinit_path_local": "/usr/bin"
         },
         "hadoop-env": {
+            "hdfs_tmp_dir": "/tmp",
             "namenode_opt_maxnewsize": "200m",
             "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
index 209651d..178fe7b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
@@ -46,6 +46,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
@@ -566,6 +567,7 @@
             "zk_user": "zookeeper"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
index bb9a3a2..555c160 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
@@ -46,6 +46,7 @@
     "clusterName": "c1", 
     "hostname": "c6403.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
@@ -595,6 +596,7 @@
             "zk_user": "zookeeper"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
index adac072..cf4094f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
@@ -6,6 +6,7 @@
     "componentName": "", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
@@ -108,6 +109,7 @@
             "fs.defaultFS": "hdfs://c6403.org:8020"
         },   
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json
index fea5e34..44db842 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json
@@ -46,6 +46,7 @@
     "clusterName": "c1", 
     "hostname": "c6403.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
@@ -567,6 +568,7 @@
             "zk_user": "zookeeper"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
index 71af43a..58827ae 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
@@ -46,6 +46,7 @@
     "clusterName": "c1", 
     "hostname": "c6403.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
@@ -566,6 +567,7 @@
             "zk_user": "zookeeper"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
index 0b1be03..f572413 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -231,6 +232,7 @@
             "xml_configurations_supported" : "false"
         },
       "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "hdfs_user_principal" : "",
             "hdfs_user_keytab" : "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
index 47bb75f..5147603 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -231,6 +232,7 @@
             "xml_configurations_supported" : "false"
         },
       "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "hdfs_user_principal" : "",
             "hdfs_user_keytab" : "",


[02/21] ambari git commit: AMBARI-15361. Fix ordering of Alter table calls which could result in Region Close issue. Futher improvements. (swagle)

Posted by jl...@apache.org.
AMBARI-15361. Fix ordering of Alter table calls which could result in Region Close issue. Futher improvements. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0cc8382f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0cc8382f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0cc8382f

Branch: refs/heads/AMBARI-13364
Commit: 0cc8382f1f5b6e9bde2eb7be0d7d3b45f37cd75a
Parents: 335ddb1
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Wed Mar 9 16:38:51 2016 -0800
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Wed Mar 9 16:38:51 2016 -0800

----------------------------------------------------------------------
 .../timeline/HBaseTimelineMetricStore.java      |   4 +-
 .../metrics/timeline/PhoenixHBaseAccessor.java  | 145 +++++++------------
 .../timeline/ITPhoenixHBaseAccessor.java        |  29 +++-
 3 files changed, 75 insertions(+), 103 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0cc8382f/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
index 465fe95..2f080e3 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
@@ -86,9 +86,7 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
       metricMetadataManager = new TimelineMetricMetadataManager(hBaseAccessor, metricsConf);
       metricMetadataManager.initializeMetadata();
       // Initialize policies before TTL update
-      hBaseAccessor.initPolicies();
-      // Alter TTL on tables
-      hBaseAccessor.alterMetricTableTTL();
+      hBaseAccessor.initPoliciesAndTTL();
 
       if (Boolean.parseBoolean(metricsConf.get(USE_GROUPBY_AGGREGATOR_QUERIES, "true"))) {
         LOG.info("Using group by aggregators for aggregating host and cluster metrics.");

http://git-wip-us.apache.org/repos/asf/ambari/blob/0cc8382f/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
index 8cfe9a9..eb48bb2 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
@@ -22,6 +22,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.util.RetryCounter;
@@ -46,7 +47,6 @@ import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.SplitByMetricNamesCondition;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.phoenix.exception.PhoenixIOException;
-import org.apache.phoenix.exception.SQLExceptionCode;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.type.TypeReference;
 
@@ -84,7 +84,6 @@ import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.ti
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.OUT_OFF_BAND_DATA_TIME_ALLOWANCE;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.PRECISION_TABLE_SPLIT_POINTS;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.PRECISION_TABLE_TTL;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.ALTER_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_HOSTED_APPS_METADATA_TABLE_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_METRICS_AGGREGATE_TABLE_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_METRICS_CLUSTER_AGGREGATE_GROUPED_TABLE_SQL;
@@ -150,14 +149,7 @@ public class PhoenixHBaseAccessor {
   static final String BLOCKING_STORE_FILES_KEY =
     "hbase.hstore.blockingStoreFiles";
 
-  private final String precisionTtl;
-  private final String hostMinTtl;
-  private final String hostHourTtl;
-  private final String hostDailyTtl;
-  private final String clusterSecTtl;
-  private final String clusterMinTtl;
-  private final String clusterHourTtl;
-  private final String clusterDailyTtl;
+  private HashMap<String, String> tableTTL = new HashMap<>();
 
   public PhoenixHBaseAccessor(Configuration hbaseConf,
                               Configuration metricsConf){
@@ -183,14 +175,14 @@ public class PhoenixHBaseAccessor {
       DEFAULT_OUT_OF_BAND_TIME_ALLOWANCE);
     this.skipBlockCacheForAggregatorsEnabled = metricsConf.getBoolean(AGGREGATORS_SKIP_BLOCK_CACHE, false);
 
-    precisionTtl = getDaysInSeconds(metricsConf.get(PRECISION_TABLE_TTL, "1"));           //1 day
-    hostMinTtl = getDaysInSeconds(metricsConf.get(HOST_MINUTE_TABLE_TTL, "7"));           //7 days
-    hostHourTtl = getDaysInSeconds(metricsConf.get(HOST_HOUR_TABLE_TTL, "30"));           //30 days
-    hostDailyTtl = getDaysInSeconds(metricsConf.get(HOST_DAILY_TABLE_TTL, "365"));        //1 year
-    clusterSecTtl = getDaysInSeconds(metricsConf.get(CLUSTER_SECOND_TABLE_TTL, "7"));     //7 days
-    clusterMinTtl = getDaysInSeconds(metricsConf.get(CLUSTER_MINUTE_TABLE_TTL, "30"));    //30 days
-    clusterHourTtl = getDaysInSeconds(metricsConf.get(CLUSTER_HOUR_TABLE_TTL, "365"));    //1 year
-    clusterDailyTtl = getDaysInSeconds(metricsConf.get(CLUSTER_DAILY_TABLE_TTL, "730"));  //2 years
+    tableTTL.put(METRICS_RECORD_TABLE_NAME, getDaysInSeconds(metricsConf.get(PRECISION_TABLE_TTL, "1")));                            //1 day
+    tableTTL.put(METRICS_AGGREGATE_MINUTE_TABLE_NAME, getDaysInSeconds(metricsConf.get(HOST_MINUTE_TABLE_TTL, "7")));                //7 days
+    tableTTL.put(METRICS_AGGREGATE_HOURLY_TABLE_NAME, getDaysInSeconds(metricsConf.get(HOST_HOUR_TABLE_TTL, "30")));                 //30 days
+    tableTTL.put(METRICS_AGGREGATE_DAILY_TABLE_NAME, getDaysInSeconds(metricsConf.get(HOST_DAILY_TABLE_TTL, "365")));                //1 year
+    tableTTL.put(METRICS_CLUSTER_AGGREGATE_TABLE_NAME, getDaysInSeconds(metricsConf.get(CLUSTER_SECOND_TABLE_TTL, "7")));            //7 days
+    tableTTL.put(METRICS_CLUSTER_AGGREGATE_MINUTE_TABLE_NAME, getDaysInSeconds(metricsConf.get(CLUSTER_MINUTE_TABLE_TTL, "30")));    //30 days
+    tableTTL.put(METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME, getDaysInSeconds(metricsConf.get(CLUSTER_HOUR_TABLE_TTL, "365")));     //1 year
+    tableTTL.put(METRICS_CLUSTER_AGGREGATE_DAILY_TABLE_NAME, getDaysInSeconds(metricsConf.get(CLUSTER_DAILY_TABLE_TTL, "730")));     //2 years
   }
 
   private static TimelineMetric getLastTimelineMetricFromResultSet(ResultSet rs)
@@ -254,73 +246,6 @@ public class PhoenixHBaseAccessor {
     return dataSource.getHBaseAdmin();
   }
 
-  /**
-   * Set TTL on tables based on user settings
-   */
-  protected void alterMetricTableTTL() {
-    Connection conn = null;
-    Statement stmt = null;
-
-    try {
-      LOG.info("Initializing metrics schema...");
-      conn = getConnectionRetryingOnException();
-      stmt = conn.createStatement();
-
-      //alter TTL options to update tables
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_RECORD_TABLE_NAME,
-        precisionTtl));
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_AGGREGATE_MINUTE_TABLE_NAME,
-        hostMinTtl));
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_AGGREGATE_HOURLY_TABLE_NAME,
-        hostHourTtl));
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_AGGREGATE_DAILY_TABLE_NAME,
-        hostDailyTtl));
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_CLUSTER_AGGREGATE_TABLE_NAME,
-        clusterSecTtl));
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_CLUSTER_AGGREGATE_MINUTE_TABLE_NAME,
-        clusterMinTtl));
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME,
-        clusterHourTtl));
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_CLUSTER_AGGREGATE_DAILY_TABLE_NAME,
-        clusterDailyTtl));
-
-      conn.commit();
-
-
-    } catch (InterruptedException e) {
-      LOG.warn("Error updating TTL on tables.", e);
-    } catch (SQLException sql) {
-      if (sql.getErrorCode() == SQLExceptionCode.SET_UNSUPPORTED_PROP_ON_ALTER_TABLE.getErrorCode()) {
-        LOG.warn("Update TTL on tables is unsupported by the phoenix version. " + sql.getMessage());
-      } else {
-        LOG.warn("Error updating TTL on tables.", sql);
-      }
-    } finally {
-      if (stmt != null) {
-        try {
-          stmt.close();
-        } catch (SQLException e) {
-          // Ignore
-        }
-      }
-      if (conn != null) {
-        try {
-          conn.close();
-        } catch (SQLException e) {
-          // Ignore
-        }
-      }
-    }
-  }
-
   protected void initMetricSchema() {
     Connection conn = null;
     Statement stmt = null;
@@ -344,33 +269,47 @@ public class PhoenixHBaseAccessor {
 
       // Host level
       String precisionSql = String.format(CREATE_METRICS_TABLE_SQL,
-        encoding, precisionTtl, compression);
+        encoding, tableTTL.get(METRICS_RECORD_TABLE_NAME), compression);
       String splitPoints = metricsConf.get(PRECISION_TABLE_SPLIT_POINTS);
       if (!StringUtils.isEmpty(splitPoints)) {
         precisionSql += getSplitPointsStr(splitPoints);
       }
       stmt.executeUpdate(precisionSql);
       stmt.executeUpdate(String.format(CREATE_METRICS_AGGREGATE_TABLE_SQL,
-        METRICS_AGGREGATE_MINUTE_TABLE_NAME, encoding, hostMinTtl, compression));
+        METRICS_AGGREGATE_MINUTE_TABLE_NAME, encoding,
+        tableTTL.get(METRICS_AGGREGATE_MINUTE_TABLE_NAME),
+        compression));
       stmt.executeUpdate(String.format(CREATE_METRICS_AGGREGATE_TABLE_SQL,
-        METRICS_AGGREGATE_HOURLY_TABLE_NAME, encoding, hostHourTtl, compression));
+        METRICS_AGGREGATE_HOURLY_TABLE_NAME, encoding,
+        tableTTL.get(METRICS_AGGREGATE_HOURLY_TABLE_NAME),
+        compression));
       stmt.executeUpdate(String.format(CREATE_METRICS_AGGREGATE_TABLE_SQL,
-        METRICS_AGGREGATE_DAILY_TABLE_NAME, encoding, hostDailyTtl, compression));
+        METRICS_AGGREGATE_DAILY_TABLE_NAME, encoding,
+        tableTTL.get(METRICS_AGGREGATE_DAILY_TABLE_NAME),
+        compression));
 
       // Cluster level
       String aggregateSql = String.format(CREATE_METRICS_CLUSTER_AGGREGATE_TABLE_SQL,
-        METRICS_CLUSTER_AGGREGATE_TABLE_NAME, encoding, clusterMinTtl, compression);
+        METRICS_CLUSTER_AGGREGATE_TABLE_NAME, encoding,
+        tableTTL.get(METRICS_CLUSTER_AGGREGATE_TABLE_NAME),
+        compression);
       splitPoints = metricsConf.get(AGGREGATE_TABLE_SPLIT_POINTS);
       if (!StringUtils.isEmpty(splitPoints)) {
         aggregateSql += getSplitPointsStr(splitPoints);
       }
       stmt.executeUpdate(aggregateSql);
       stmt.executeUpdate(String.format(CREATE_METRICS_CLUSTER_AGGREGATE_GROUPED_TABLE_SQL,
-        METRICS_CLUSTER_AGGREGATE_MINUTE_TABLE_NAME, encoding, clusterHourTtl, compression));
+        METRICS_CLUSTER_AGGREGATE_MINUTE_TABLE_NAME, encoding,
+        tableTTL.get(METRICS_CLUSTER_AGGREGATE_MINUTE_TABLE_NAME),
+        compression));
       stmt.executeUpdate(String.format(CREATE_METRICS_CLUSTER_AGGREGATE_GROUPED_TABLE_SQL,
-        METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME, encoding, clusterHourTtl, compression));
+        METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME, encoding,
+        tableTTL.get(METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME),
+        compression));
       stmt.executeUpdate(String.format(CREATE_METRICS_CLUSTER_AGGREGATE_GROUPED_TABLE_SQL,
-        METRICS_CLUSTER_AGGREGATE_DAILY_TABLE_NAME, encoding, clusterDailyTtl, compression));
+        METRICS_CLUSTER_AGGREGATE_DAILY_TABLE_NAME, encoding,
+        tableTTL.get(METRICS_CLUSTER_AGGREGATE_DAILY_TABLE_NAME),
+        compression));
 
 
       conn.commit();
@@ -398,7 +337,7 @@ public class PhoenixHBaseAccessor {
     }
   }
 
-  protected void initPolicies() {
+  protected void initPoliciesAndTTL() {
     boolean enableNormalizer = hbaseConf.getBoolean("hbase.normalizer.enabled", true);
     boolean enableFifoCompaction = metricsConf.getBoolean("timeline.metrics.hbase.fifo.compaction.enabled", true);
 
@@ -451,6 +390,20 @@ public class PhoenixHBaseAccessor {
               " = " + 300 + " for " + tableName);
             modifyTable = true;
           }
+          // Change TTL setting to match user configuration
+          HColumnDescriptor[] columnFamilies = tableDescriptor.getColumnFamilies();
+          if (columnFamilies != null) {
+            for (HColumnDescriptor family : columnFamilies) {
+              String ttlValue = family.getValue("TTL");
+              if (StringUtils.isEmpty(ttlValue) ||
+                  !ttlValue.trim().equals(tableTTL.get(tableName))) {
+                family.setValue("TTL", tableTTL.get(tableName));
+                LOG.info("Setting TTL on table: " + tableName + " to : " +
+                  tableTTL.get(tableName) + " seconds.");
+                modifyTable = true;
+              }
+            }
+          }
 
           // Persist only if anything changed
           if (modifyTable) {
@@ -1392,8 +1345,8 @@ public class PhoenixHBaseAccessor {
     return metadataMap;
   }
 
-  private String getDaysInSeconds(String daysString) {
+  String getDaysInSeconds(String daysString) {
     double days = Double.valueOf(daysString.trim());
-    return String.valueOf((int)(days*86400));
+    return String.valueOf((int) (days * 86400));
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/0cc8382f/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
index e3e037a..93ba770 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline
 
 import junit.framework.Assert;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
@@ -38,6 +39,7 @@ import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.lang.reflect.Field;
 import java.sql.Connection;
 import java.sql.SQLException;
 import java.sql.Statement;
@@ -53,10 +55,10 @@ import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.ti
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.MetricTestHelper.createEmptyTimelineMetric;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.MetricTestHelper.createMetricHostAggregate;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.MetricTestHelper.prepareSingleTimelineMetric;
-import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor.DEFAULT_COMPACTION_POLICY_CLASS;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor.FIFO_COMPACTION_POLICY_CLASS;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor.HSTORE_COMPACTION_CLASS_KEY;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.PHOENIX_TABLES;
 
 
@@ -335,9 +337,9 @@ public class ITPhoenixHBaseAccessor extends AbstractMiniHBaseClusterTest {
   }
 
   @Test
-  public void testInitPolicies() throws Exception {
+  public void testInitPoliciesAndTTL() throws Exception {
     HBaseAdmin hBaseAdmin = hdb.getHBaseAdmin();
-
+    String precisionTtl = "";
     // Verify policies are unset
     for (String tableName : PHOENIX_TABLES) {
       HTableDescriptor tableDescriptor = hBaseAdmin.getTableDescriptor(tableName.getBytes());
@@ -345,9 +347,22 @@ public class ITPhoenixHBaseAccessor extends AbstractMiniHBaseClusterTest {
       Assert.assertFalse("Normalizer disabled by default.", tableDescriptor.isNormalizationEnabled());
       Assert.assertNull("Default compaction policy is null.",
         tableDescriptor.getConfigurationValue(HSTORE_COMPACTION_CLASS_KEY));
+
+      for (HColumnDescriptor family : tableDescriptor.getColumnFamilies()) {
+        if (tableName.equals(METRICS_RECORD_TABLE_NAME)) {
+          precisionTtl = family.getValue("TTL");
+        }
+      }
+      Assert.assertEquals("Precision TTL value.", hdb.getDaysInSeconds("1"), precisionTtl);
     }
 
-    hdb.initPolicies();
+    Field f = PhoenixHBaseAccessor.class.getDeclaredField("tableTTL");
+    f.setAccessible(true);
+    Map<String, String> precisionValues = (Map<String, String>) f.get(hdb);
+    precisionValues.put(METRICS_RECORD_TABLE_NAME, hdb.getDaysInSeconds("2"));
+    f.set(hdb, precisionValues);
+
+    hdb.initPoliciesAndTTL();
 
     // Verify expected policies are set
     boolean normalizerEnabled = false;
@@ -364,11 +379,17 @@ public class ITPhoenixHBaseAccessor extends AbstractMiniHBaseClusterTest {
         if (!normalizerEnabled || compactionPolicy == null) {
           Thread.sleep(2000l);
         }
+        if (tableName.equals(METRICS_RECORD_TABLE_NAME)) {
+          for (HColumnDescriptor family : tableDescriptor.getColumnFamilies()) {
+            precisionTtl = family.getValue("TTL");
+          }
+        }
       }
     }
 
     Assert.assertTrue("Normalizer enabled.", normalizerEnabled);
     Assert.assertEquals("FIFO compaction policy is set.", FIFO_COMPACTION_POLICY_CLASS, compactionPolicy);
+    Assert.assertEquals("Precision TTL value not changed.", hdb.getDaysInSeconds("2"), precisionTtl);
 
     hBaseAdmin.close();
   }


[16/21] ambari git commit: AMBARI-15356 filtering on version not installed hosts from the versions page does not work. (atkach)

Posted by jl...@apache.org.
AMBARI-15356 filtering on version not installed hosts from the versions page does not work. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e1686f51
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e1686f51
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e1686f51

Branch: refs/heads/AMBARI-13364
Commit: e1686f51a47621dce195d805286377470f45c1c9
Parents: e1762a4
Author: Andrii Tkach <at...@hortonworks.com>
Authored: Thu Mar 10 12:52:01 2016 +0200
Committer: Andrii Tkach <at...@hortonworks.com>
Committed: Thu Mar 10 17:48:41 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/controllers/main/host.js         | 16 +++++++++-----
 ambari-web/app/views/common/filter_view.js      | 13 ++++++-----
 .../stack_upgrade/upgrade_version_box_view.js   | 23 ++++++++++----------
 ambari-web/test/controllers/main/host_test.js   |  6 ++---
 .../upgrade_version_box_view_test.js            |  3 ++-
 5 files changed, 34 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e1686f51/ambari-web/app/controllers/main/host.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/host.js b/ambari-web/app/controllers/main/host.js
index 5d7e268..efc0ba2 100644
--- a/ambari-web/app/controllers/main/host.js
+++ b/ambari-web/app/controllers/main/host.js
@@ -522,12 +522,13 @@ App.MainHostController = Em.ArrayController.extend(App.TableServerMixin, {
   /**
    * Filter hosts by stack version and state
    * @param {String} displayName
-   * @param {String} state
+   * @param {Array} states
    */
-  filterByStack: function (displayName, state) {
-    if (!displayName || !state) return;
+  filterByStack: function (displayName, states) {
+    if (Em.isNone(displayName) || Em.isNone(states) || !states.length) return;
     var colPropAssoc = this.get('colPropAssoc');
     var map = this.get('labelValueMap');
+    var displayStates = [];
 
     var versionFilter = {
       iColumn: 16,
@@ -536,14 +537,17 @@ App.MainHostController = Em.ArrayController.extend(App.TableServerMixin, {
     };
     var stateFilter = {
       iColumn: 17,
-      value: state.toUpperCase(),
+      value: states,
       type: 'string'
     };
     map["Stack Version"] = colPropAssoc[versionFilter.iColumn];
     map["Version State"] = colPropAssoc[stateFilter.iColumn];
-    map[App.HostStackVersion.formatStatus(stateFilter.value)] = stateFilter.value;
+    stateFilter.value.forEach(function(state) {
+      map[App.HostStackVersion.formatStatus(state)] = state;
+      displayStates.push(App.HostStackVersion.formatStatus(state));
+    });
     var versionFilterStr = '"Stack Version": "' + versionFilter.value + '"';
-    var stateFilterStr = '"Version State": "' + App.HostStackVersion.formatStatus(stateFilter.value) + '"';
+    var stateFilterStr = '"Version State": "' + displayStates.join(',') + '"';
     App.db.setFilterConditions(this.get('name'), [versionFilter, stateFilter]);
     App.db.setComboSearchQuery(this.get('name'), [versionFilterStr, stateFilterStr].join(' '));
   },

http://git-wip-us.apache.org/repos/asf/ambari/blob/e1686f51/ambari-web/app/views/common/filter_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/filter_view.js b/ambari-web/app/views/common/filter_view.js
index 21c8c0e..be1e1a7 100644
--- a/ambari-web/app/views/common/filter_view.js
+++ b/ambari-web/app/views/common/filter_view.js
@@ -448,8 +448,9 @@ module.exports = {
             case false:
             case '=':
               return compareValue == rowValue;
+            default:
+              return false;
           }
-          return false;
         };
       case 'date':
         return function (rowValue, rangeExp) {
@@ -472,7 +473,6 @@ module.exports = {
             default:
               return false;
           }
-          return false;
         };
       case 'number':
         return function (rowValue, rangeExp) {
@@ -498,12 +498,11 @@ module.exports = {
             case '<':
               return compareValue > rowValue;
             case '>':
-              return compareValue < rowValue
+              return compareValue < rowValue;
             case '=':
             default:
               return compareValue === rowValue;
           }
-          return false;
         };
       case 'sub-resource':
         return function (origin, compareValue) {
@@ -513,8 +512,10 @@ module.exports = {
 
           return origin.some(function (item) {
             for (var i = 0, l = compareValue.length; i < l; i++) {
-              if(item.get(compareValue[i].property) !== compareValue[i].value) {
-                return false;
+              if (Array.isArray(compareValue[i].value)) {
+                if (!compareValue[i].value.contains(item.get(compareValue[i].property))) return false;
+              } else {
+                if (item.get(compareValue[i].property) !== compareValue[i].value) return false;
               }
             }
             return true;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e1686f51/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
index e0c0ae2..9063af3 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
@@ -81,17 +81,17 @@ App.UpgradeVersionBoxView = Em.View.extend({
    */
   versionStateMap: {
     'current': {
-      'id': 'current',
+      'value': ['CURRENT'],
       'property': 'currentHosts',
       'label': Em.I18n.t('admin.stackVersions.hosts.popup.header.current')
     },
     'installed': {
-      'id': 'installed',
+      'value': ['INSTALLED'],
       'property': 'installedHosts',
       'label': Em.I18n.t('admin.stackVersions.hosts.popup.header.installed')
     },
     'not_installed': {
-      'id': 'installing',
+      'value': ['INSTALLING', 'INSTALL_FAILED', 'OUT_OF_SYNC'],
       'property': 'notInstalledHosts',
       'label': Em.I18n.t('admin.stackVersions.hosts.popup.header.not_installed')
     }
@@ -400,7 +400,7 @@ App.UpgradeVersionBoxView = Em.View.extend({
           if ($('.version-box-popup .modal')) {
             $('.version-box-popup .modal .modal-footer .btn-success').click();
           }
-          self.filterHostsByStack(displayName, status.id);
+          self.filterHostsByStack(displayName, status.value);
         }
       });
     }
@@ -408,14 +408,15 @@ App.UpgradeVersionBoxView = Em.View.extend({
 
   /**
    * goes to the hosts page with content filtered by repo_version_name and repo_version_state
-   * @param displayName
-   * @param state
+   * @param {string} displayName
+   * @param {Array} states
    * @method filterHostsByStack
    */
-  filterHostsByStack: function (displayName, state) {
-    if (!displayName || !state) return;
-    App.router.get('mainHostController').filterByStack(displayName, state);
+  filterHostsByStack: function (displayName, states) {
+    if (Em.isNone(displayName) || Em.isNone(states) || !states.length) return;
+    App.router.get('mainHostController').filterByStack(displayName, states);
     App.router.get('mainHostController').set('showFilterConditionsFirstLoad', true);
+    App.router.get('mainHostController').set('filterChangeHappened', true);
     App.router.transitionTo('hosts.index');
   },
 
@@ -430,7 +431,7 @@ App.UpgradeVersionBoxView = Em.View.extend({
     var maintenanceHosts = this.get('maintenanceHosts');
     if (notInstalledHosts.length && notRequiredHosts.length) {
       notRequiredHosts.forEach(function(not_required) {
-        var index = notInstalledHosts.indexOf(not_required)
+        var index = notInstalledHosts.indexOf(not_required);
         if (index > -1) {
           notInstalledHosts.splice(index, 1);
         }
@@ -438,7 +439,7 @@ App.UpgradeVersionBoxView = Em.View.extend({
     }
     if (notInstalledHosts.length && maintenanceHosts.length) {
       maintenanceHosts.forEach(function(mm_host) {
-        var index = notInstalledHosts.indexOf(mm_host)
+        var index = notInstalledHosts.indexOf(mm_host);
         if (index > -1) {
           notInstalledHosts.splice(index, 1);
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e1686f51/ambari-web/test/controllers/main/host_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/host_test.js b/ambari-web/test/controllers/main/host_test.js
index e2041f3..e4c1a45 100644
--- a/ambari-web/test/controllers/main/host_test.js
+++ b/ambari-web/test/controllers/main/host_test.js
@@ -387,7 +387,7 @@ describe('MainHostController', function () {
     });
 
     it("displayName is null", function() {
-      hostController.filterByStack(null, 'INSTALLED');
+      hostController.filterByStack(null, ['INSTALLED']);
       expect(App.db.setFilterConditions.called).to.be.false;
     });
 
@@ -398,7 +398,7 @@ describe('MainHostController', function () {
 
     it("stack and displayName exist", function() {
       hostController.set('name', 'ctrl1');
-      hostController.filterByStack('stack1', 'INSTALLED');
+      hostController.filterByStack('stack1', ['INSTALLED']);
       expect(App.db.setFilterConditions.calledWith('ctrl1', [
       {
         iColumn: 16,
@@ -407,7 +407,7 @@ describe('MainHostController', function () {
       },
       {
         iColumn: 17,
-        value: 'INSTALLED',
+        value: ['INSTALLED'],
         type: 'string'
       }])).to.be.true;
     });

http://git-wip-us.apache.org/repos/asf/ambari/blob/e1686f51/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
index 5746d3a..0ff5ce9 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
@@ -296,7 +296,7 @@ describe('App.UpgradeVersionBoxView', function () {
       }));
       view.set('p1', ['host1']);
       var popup = view.showHosts({contexts: [
-        {id: 1, 'property': 'p1'}
+        {value: 1, 'property': 'p1'}
       ]});
       expect(App.ModalPopup.show.calledOnce).to.be.true;
       popup.onPrimary();
@@ -324,6 +324,7 @@ describe('App.UpgradeVersionBoxView', function () {
     it("version and state are valid", function () {
       view.filterHostsByStack('version', 'state');
       expect(mock.set.calledWith('showFilterConditionsFirstLoad', true)).to.be.true;
+      expect(mock.set.calledWith('filterChangeHappened', true)).to.be.true;
       expect(mock.filterByStack.calledWith('version', 'state')).to.be.true;
       expect(App.router.transitionTo.calledWith('hosts.index')).to.be.true;
     });


[05/21] ambari git commit: AMBARI-15363 - Postgres And c3p0 Queries Can Hang Ambari On Large Queries (jonathanhurley)

Posted by jl...@apache.org.
AMBARI-15363 - Postgres And c3p0 Queries Can Hang Ambari On Large Queries (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0887e8e3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0887e8e3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0887e8e3

Branch: refs/heads/AMBARI-13364
Commit: 0887e8e3d6b6030143a8f3b38475e72875f899f1
Parents: 5166908
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Mar 9 17:29:48 2016 -0500
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Mar 9 23:40:14 2016 -0500

----------------------------------------------------------------------
 ambari-project/pom.xml                          |   2 +-
 ambari-server/pom.xml                           |  16 +-
 .../server/api/query/JpaPredicateVisitor.java   |  10 +-
 .../ambari/server/api/query/JpaSortBuilder.java |  31 +++-
 .../server/api/query/JpaSortBuilderTest.java    | 153 +++++++++++++++++++
 5 files changed, 205 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0887e8e3/ambari-project/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-project/pom.xml b/ambari-project/pom.xml
index ed94004..b3d9ca2 100644
--- a/ambari-project/pom.xml
+++ b/ambari-project/pom.xml
@@ -210,7 +210,7 @@
       <dependency>
         <groupId>org.eclipse.persistence</groupId>
         <artifactId>eclipselink</artifactId>
-        <version>2.5.2</version>
+        <version>2.6.2</version>
       </dependency>
       <dependency>
         <groupId>org.postgresql</groupId>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0887e8e3/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index f691fad..83424c2 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -327,7 +327,7 @@
           <dependency>
             <groupId>org.eclipse.persistence</groupId>
             <artifactId>eclipselink</artifactId>
-            <version>2.4.2</version>
+            <version>2.6.2</version>
           </dependency>
         </dependencies>
       </plugin>
@@ -399,7 +399,7 @@
                 </source>
               </sources>
             </mapping>
-			<mapping>
+      			<mapping>
               <directory>/usr</directory>
               <username>root</username>
               <groupname>root</groupname>
@@ -1194,6 +1194,12 @@
       <groupId>org.quartz-scheduler</groupId>
       <artifactId>quartz</artifactId>
       <version>2.2.1</version>
+      <exclusions>
+        <exclusion>
+          <groupId>c3p0</groupId>
+          <artifactId>c3p0</artifactId>
+        </exclusion>
+      </exclusions>      
     </dependency>
     <dependency>
       <groupId>org.quartz-scheduler</groupId>
@@ -1267,6 +1273,12 @@
       <artifactId>commons-cli</artifactId>
       <version>1.3.1</version>
     </dependency>
+    <dependency>
+      <groupId>com.mchange</groupId>
+      <artifactId>c3p0</artifactId>
+      <version>[0.9.5.2]</version>
+      <scope>compile</scope>
+    </dependency>
   </dependencies>
 
   <pluginRepositories>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0887e8e3/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
index 3a8a631..bed0e5a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
@@ -55,12 +55,17 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
   /**
    * The root that the {@code from} clause requests from.
    */
-  private Root<T> m_root;
+  final private Root<T> m_root;
 
   /**
    * The query to submit to JPA.
    */
-  private CriteriaQuery<T> m_query;
+  final private CriteriaQuery<T> m_query;
+
+  /**
+   * The entity class that the root of the query is built from.
+   */
+  final private Class<T> m_entityClass;
 
   /**
    * The last calculated predicate.
@@ -87,6 +92,7 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
   public JpaPredicateVisitor(EntityManager entityManager, Class<T> entityClass) {
     m_entityManager = entityManager;
     m_builder = m_entityManager.getCriteriaBuilder();
+    m_entityClass = entityClass;
     m_query = m_builder.createQuery(entityClass);
     m_root = m_query.from(entityClass);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/0887e8e3/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java
index 8021346..5161e83 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java
@@ -19,16 +19,20 @@ package org.apache.ambari.server.api.query;
 
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Iterator;
 import java.util.List;
+import java.util.Set;
 
 import javax.persistence.criteria.CriteriaBuilder;
 import javax.persistence.criteria.CriteriaQuery;
 import javax.persistence.criteria.Order;
 import javax.persistence.criteria.Path;
+import javax.persistence.criteria.Root;
 import javax.persistence.metamodel.SingularAttribute;
 
 import org.apache.ambari.server.controller.spi.SortRequest;
 import org.apache.ambari.server.controller.spi.SortRequestProperty;
+import org.apache.commons.lang.ObjectUtils;
 
 /**
  * The {@link JpaSortBuilder} class is used to convert and Ambari
@@ -84,9 +88,32 @@ public class JpaSortBuilder<T> {
       Path<?> path = null;
       for (SingularAttribute<?, ?> singularAttribute : singularAttributes) {
         if (null == path) {
+
           CriteriaQuery<T> query = visitor.getCriteriaQuery();
-          path = query.from(visitor.getEntityClass()).get(
-              singularAttribute.getName());
+          Set<Root<?>> roots = query.getRoots();
+
+          // if there are existing roots; use the existing roots to prevent more
+          // roots from being added potentially causing a cartesian product
+          // where we don't want one
+          if (null != roots && !roots.isEmpty()) {
+            Iterator<Root<?>> iterator = roots.iterator();
+            while (iterator.hasNext()) {
+              Root<?> root = iterator.next();
+
+              Class<?> visitorEntityClass = visitor.getEntityClass();
+              if (ObjectUtils.equals(visitorEntityClass, root.getJavaType())
+                  || ObjectUtils.equals(visitorEntityClass, root.getModel().getJavaType())) {
+                path = root.get(singularAttribute.getName());
+                break;
+              }
+            }
+          }
+
+          // no roots exist already which match this entity class, create a new
+          // path
+          if (null == path) {
+            path = query.from(visitor.getEntityClass()).get(singularAttribute.getName());
+          }
         } else {
           path = path.get(singularAttribute.getName());
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/0887e8e3/ambari-server/src/test/java/org/apache/ambari/server/api/query/JpaSortBuilderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/query/JpaSortBuilderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/query/JpaSortBuilderTest.java
new file mode 100644
index 0000000..b9bfc50
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/query/JpaSortBuilderTest.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.api.query;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import javax.persistence.EntityManager;
+import javax.persistence.criteria.CriteriaQuery;
+import javax.persistence.criteria.Order;
+import javax.persistence.criteria.Root;
+import javax.persistence.metamodel.SingularAttribute;
+
+import org.apache.ambari.server.controller.internal.AlertHistoryResourceProvider;
+import org.apache.ambari.server.controller.internal.SortRequestImpl;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.SortRequest;
+import org.apache.ambari.server.controller.spi.SortRequestProperty;
+import org.apache.ambari.server.controller.utilities.PredicateBuilder;
+import org.apache.ambari.server.controller.utilities.PredicateHelper;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.entities.AlertHistoryEntity;
+import org.apache.ambari.server.orm.entities.AlertHistoryEntity_;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+
+import junit.framework.Assert;
+
+/**
+ * Tests the {@link JpaSortBuilder}.
+ */
+public class JpaSortBuilderTest {
+
+  private Injector m_injector;
+
+  @Before
+  public void before() {
+    m_injector = Guice.createInjector(new InMemoryDefaultTestModule());
+    m_injector.getInstance(GuiceJpaInitializer.class);
+    m_injector.injectMembers(this);
+  }
+
+  /**
+   * Tests that adding a sort does not create another {@link Root} in the
+   * {@link CriteriaQuery}. A duplicate root will cause a cartesian product
+   * similar to:
+   *
+   * <pre>
+   * SELECT t0.alert_id,
+   *   t0.alert_instance,
+   *   t0.alert_label,
+   *   t0.alert_state,
+   *   t0.alert_text,
+   *   t0.alert_timestamp,
+   *   t0.cluster_id,
+   *   t0.component_name,
+   *   t0.host_name,
+   *   t0.service_name,
+   *   t0.alert_definition_id
+   * FROM   alert_history t0,
+   *   alert_history t2,
+   *   alert_definition t1
+   * WHERE  ( ( t1.definition_name = ? )
+   *     AND ( t1.definition_id = t2.alert_definition_id ) )
+   * ORDER  BY t0.alert_timestamp DESC
+   * </pre>
+   *
+   * where the root for {@code alert_history} is added twice.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testSortDoesNotAddExtraRootPaths() throws Exception {
+    // create a sort request against the entity directly
+    List<SortRequestProperty> sortRequestProperties = new ArrayList<>();
+
+    sortRequestProperties.add(
+        new SortRequestProperty(AlertHistoryResourceProvider.ALERT_HISTORY_TIMESTAMP,
+            org.apache.ambari.server.controller.spi.SortRequest.Order.ASC));
+
+    SortRequest sortRequest = new SortRequestImpl(sortRequestProperties);
+
+    // create a complex, cross-entity predicate
+    Predicate predicate = new PredicateBuilder().property(
+            AlertHistoryResourceProvider.ALERT_HISTORY_DEFINITION_NAME).equals("foo").toPredicate();
+
+    MockAlertHistoryredicateVisitor visitor = new MockAlertHistoryredicateVisitor();
+    PredicateHelper.visit(predicate, visitor);
+
+    JpaSortBuilder<AlertHistoryEntity> sortBuilder = new JpaSortBuilder<AlertHistoryEntity>();
+    List<Order> sortOrders = sortBuilder.buildSortOrders(sortRequest, visitor);
+
+    Assert.assertEquals(sortOrders.size(), 1);
+
+    // verify the CriteriaQuery has the correct roots
+    // it should have one for the main query predicate
+    CriteriaQuery<AlertHistoryEntity> query = visitor.getCriteriaQuery();
+    Set<Root<?>> roots = query.getRoots();
+    Assert.assertEquals(1, roots.size());
+  }
+
+  /**
+   * The {@link HistoryPredicateVisitor} is used to convert an Ambari
+   * {@link Predicate} into a JPA {@link javax.persistence.criteria.Predicate}.
+   */
+  private final class MockAlertHistoryredicateVisitor
+      extends JpaPredicateVisitor<AlertHistoryEntity> {
+
+    /**
+     * Constructor.
+     *
+     */
+    public MockAlertHistoryredicateVisitor() {
+      super(m_injector.getInstance(EntityManager.class), AlertHistoryEntity.class);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public Class<AlertHistoryEntity> getEntityClass() {
+      return AlertHistoryEntity.class;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public List<? extends SingularAttribute<?, ?>> getPredicateMapping(String propertyId) {
+      return AlertHistoryEntity_.getPredicateMapping().get(propertyId);
+    }
+  }
+}


[15/21] ambari git commit: AMBARI-15341: Use pxf-service restart method when Restart action is called via Ambari (bhuvnesh2703 via mithmatt)

Posted by jl...@apache.org.
AMBARI-15341: Use pxf-service restart method when Restart action is called via Ambari (bhuvnesh2703 via mithmatt)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e1762a42
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e1762a42
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e1762a42

Branch: refs/heads/AMBARI-13364
Commit: e1762a4281bbd78228c0ac162d8c19932de955e5
Parents: 3f3fdbd
Author: Matt <mm...@pivotal.io>
Authored: Thu Mar 10 04:11:31 2016 -0800
Committer: Matt <mm...@pivotal.io>
Committed: Thu Mar 10 04:11:31 2016 -0800

----------------------------------------------------------------------
 .../resources/common-services/PXF/3.0.0/package/scripts/pxf.py   | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e1762a42/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
index 9a1eaf3..c3da47f 100644
--- a/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
+++ b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
@@ -58,6 +58,10 @@ class Pxf(Script):
     self.__execute_service_command("stop")
 
 
+  def restart(self, env):
+    self.start(env)
+
+
   def status(self, env):
     try:
       self.__execute_service_command("status")


[19/21] ambari git commit: AMBARI-15376: Fix typo in get_stack_version (jluniya)

Posted by jl...@apache.org.
AMBARI-15376: Fix typo in get_stack_version (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7cdb16c1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7cdb16c1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7cdb16c1

Branch: refs/heads/AMBARI-13364
Commit: 7cdb16c1d8e575dc00b71877858abe9d95bdd871
Parents: ac6b0d1
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Thu Mar 10 12:31:02 2016 -0800
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Thu Mar 10 12:31:02 2016 -0800

----------------------------------------------------------------------
 .../resource_management/libraries/functions/get_stack_version.py   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7cdb16c1/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
index c00b541..f2e6567 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
@@ -81,7 +81,7 @@ def get_stack_version(package_name):
       'Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
 
   stack_version = re.sub(package_name + ' - ', '', stack_output)
-  stack_version = stack_output.rstrip()
+  stack_version = stack_version.rstrip()
   match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', stack_version)
 
   if match is None:


[06/21] ambari git commit: AMBARI-15127. Separate DB operation during upgrade and add them as pre-upgrade tasks (Fix recursive Flag) (gautam)

Posted by jl...@apache.org.
AMBARI-15127. Separate DB operation during upgrade and add them as pre-upgrade tasks (Fix recursive Flag) (gautam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/26b33579
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/26b33579
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/26b33579

Branch: refs/heads/AMBARI-13364
Commit: 26b3357907362ba75b1e25360633a8634473e12d
Parents: 0887e8e
Author: Gautam Borad <ga...@apache.org>
Authored: Wed Mar 9 15:22:08 2016 +0000
Committer: Gautam Borad <ga...@apache.org>
Committed: Thu Mar 10 13:03:01 2016 +0530

----------------------------------------------------------------------
 .../RANGER/0.4.0/package/scripts/setup_ranger_xml.py             | 4 ++--
 .../common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py  | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/26b33579/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
index 64e49dc..d6243a2 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
@@ -267,7 +267,7 @@ def copy_jdbc_connector(stack_version=None):
 
   Directory(params.java_share_dir,
     mode=0755,
-    recursive=True,
+    create_parents=True,
     cd_access="a"
   )
 
@@ -291,7 +291,7 @@ def copy_jdbc_connector(stack_version=None):
 
     Directory(params.jdbc_libs_dir,
       cd_access="a",
-      recursive=True)
+      create_parents=True)
 
     Execute(as_sudo(['yes', '|', 'cp', params.libs_path_in_archive, params.jdbc_libs_dir], auto_escape=False),
             path=["/bin", "/usr/bin/"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/26b33579/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
index c307f18..92fe529 100755
--- a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
@@ -271,7 +271,7 @@ def copy_jdbc_connector(stack_version=None):
 
   Directory(params.java_share_dir,
     mode=0755,
-    recursive=True,
+    create_parents=True,
     cd_access="a"
   )
 
@@ -295,7 +295,7 @@ def copy_jdbc_connector(stack_version=None):
 
     Directory(params.jdbc_libs_dir,
       cd_access="a",
-      recursive=True)
+      create_parents=True)
 
     Execute(as_sudo(['yes', '|', 'cp', params.libs_path_in_archive, params.jdbc_libs_dir], auto_escape=False),
       path=["/bin", "/usr/bin/"])


[20/21] ambari git commit: AMBARI-15265. Install & Manage Zeppelin with Ambari - RAT failure fix (Renjith Kamath via smohanty)

Posted by jl...@apache.org.
AMBARI-15265. Install & Manage Zeppelin with Ambari - RAT failure fix (Renjith Kamath via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a9d56217
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a9d56217
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a9d56217

Branch: refs/heads/AMBARI-13364
Commit: a9d562171596f7bb9734e41e9c3ab83295205be8
Parents: 7cdb16c
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Thu Mar 10 13:20:20 2016 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Thu Mar 10 13:20:20 2016 -0800

----------------------------------------------------------------------
 .../package/scripts/alert_check_zeppelin.py      | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a9d56217/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/alert_check_zeppelin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/alert_check_zeppelin.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/alert_check_zeppelin.py
index 90c9569..3680222 100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/alert_check_zeppelin.py
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/alert_check_zeppelin.py
@@ -1,3 +1,22 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
 import glob
 import sys
 


[13/21] ambari git commit: AMBARI-15367. Remove experimental flags for stackUpgrade, enhancedConfigs, and storeKDCCredentials (alexantonenko)

Posted by jl...@apache.org.
AMBARI-15367. Remove experimental flags for stackUpgrade, enhancedConfigs, and storeKDCCredentials (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e4544a57
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e4544a57
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e4544a57

Branch: refs/heads/AMBARI-13364
Commit: e4544a575a39c25a20bc91822f9befd0ed865d1a
Parents: 6626796
Author: Alex Antonenko <hi...@gmail.com>
Authored: Thu Mar 10 10:41:41 2016 +0200
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Thu Mar 10 13:13:14 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/app.js                           |  3 +-
 ambari-web/app/config.js                        |  3 --
 .../controllers/global/cluster_controller.js    | 25 +-----------
 .../app/controllers/global/update_controller.js |  2 +-
 .../main/admin/kerberos/step2_controller.js     |  4 +-
 ambari-web/app/mappers/hosts_mapper.js          | 41 ++++++++------------
 .../common/modal_popups/invalid_KDC_popup.hbs   | 16 ++++----
 .../app/templates/main/admin/kerberos.hbs       |  6 +--
 ambari-web/app/templates/main/host.hbs          |  6 +--
 ambari-web/app/templates/main/host/summary.hbs  |  6 +--
 ambari-web/app/views/main/host.js               |  4 --
 ambari-web/app/views/main/host/menu.js          |  4 +-
 .../global/cluster_controller_test.js           | 22 +----------
 ambari-web/test/views/main/host/menu_test.js    | 17 +-------
 14 files changed, 40 insertions(+), 119 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e4544a57/ambari-web/app/app.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/app.js b/ambari-web/app/app.js
index 52f34ab..ab189f8 100644
--- a/ambari-web/app/app.js
+++ b/ambari-web/app/app.js
@@ -194,10 +194,9 @@ module.exports = Em.Application.create({
   /**
    * when working with enhanced configs we should rely on stack version
    * as version that is below 2.2 doesn't supports it
-   * even if flag <code>supports.enhancedConfigs<code> is true
    * @type {boolean}
    */
-  isClusterSupportsEnhancedConfigs: Em.computed.and('isHadoop22Stack', 'supports.enhancedConfigs'),
+  isClusterSupportsEnhancedConfigs: Em.computed.alias('isHadoop22Stack'),
 
   /**
    * If NameNode High Availability is enabled

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4544a57/ambari-web/app/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/config.js b/ambari-web/app/config.js
index cba09d7..92ba8ae 100644
--- a/ambari-web/app/config.js
+++ b/ambari-web/app/config.js
@@ -65,7 +65,6 @@ App.enableExperimental = false;
 
 App.supports = {
   preUpgradeCheck: true,
-  stackUpgrade: true,
   displayOlderVersions: false,
   autoRollbackHA: false,
   alwaysEnableManagedMySQLForHive: false,
@@ -74,10 +73,8 @@ App.supports = {
   installGanglia: false,
   opsDuringRollingUpgrade: false,
   customizedWidgetLayout: false,
-  enhancedConfigs: true,
   showPageLoadTime: false,
   skipComponentStartAfterInstall: false,
-  storeKDCCredentials: true,
   preInstallChecks: false,
   hostComboSearchBox: true,
   serviceAutoStart: false,

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4544a57/ambari-web/app/controllers/global/cluster_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/global/cluster_controller.js b/ambari-web/app/controllers/global/cluster_controller.js
index 3c28e80..327bd45 100644
--- a/ambari-web/app/controllers/global/cluster_controller.js
+++ b/ambari-web/app/controllers/global/cluster_controller.js
@@ -202,9 +202,7 @@ App.ClusterController = Em.Controller.extend(App.ReloadPopupMixin, {
     });
 
 
-    if (App.get('supports.stackUpgrade')) {
-      self.restoreUpgradeState();
-    }
+    self.restoreUpgradeState();
 
     App.router.get('wizardWatcherController').getUser();
 
@@ -424,26 +422,7 @@ App.ClusterController = Em.Controller.extend(App.ReloadPopupMixin, {
    * @returns {$.ajax}
    */
   createKerberosAdminSession: function (credentialResource, ajaxOpt) {
-    if (App.get('supports.storeKDCCredentials')) {
-      return credentialUtils.createOrUpdateCredentials(App.get('clusterName'), credentialUtils.ALIAS.KDC_CREDENTIALS, credentialResource).then(function() {
-        if (ajaxOpt) {
-          $.ajax(ajaxOpt);
-        }
-      });
-    }
-
-    return App.ajax.send({
-      name: 'common.cluster.update',
-      sender: this,
-      data: {
-        clusterName: App.get('clusterName'),
-        data: [{
-          session_attributes: {
-            kerberos_admin: {principal: credentialResource.principal, password: credentialResource.key}
-          }
-        }]
-      }
-    }).success(function () {
+    return credentialUtils.createOrUpdateCredentials(App.get('clusterName'), credentialUtils.ALIAS.KDC_CREDENTIALS, credentialResource).then(function() {
       if (ajaxOpt) {
         $.ajax(ajaxOpt);
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4544a57/ambari-web/app/controllers/global/update_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/global/update_controller.js b/ambari-web/app/controllers/global/update_controller.js
index de4523b..5fb69f3 100644
--- a/ambari-web/app/controllers/global/update_controller.js
+++ b/ambari-web/app/controllers/global/update_controller.js
@@ -260,7 +260,7 @@ App.UpdateController = Em.Controller.extend({
       }
     }
 
-    realUrl = realUrl.replace("<stackVersions>", App.get('supports.stackUpgrade') ? stackVersionInfo : "");
+    realUrl = realUrl.replace("<stackVersions>", stackVersionInfo);
     realUrl = realUrl.replace("<metrics>", lazyLoadMetrics ? "" : "metrics/disk,metrics/load/load_one,");
     realUrl = realUrl.replace('<hostDetailsParams>', hostDetailsParams);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4544a57/ambari-web/app/controllers/main/admin/kerberos/step2_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/kerberos/step2_controller.js b/ambari-web/app/controllers/main/admin/kerberos/step2_controller.js
index 9b411c6..36fb227 100644
--- a/ambari-web/app/controllers/main/admin/kerberos/step2_controller.js
+++ b/ambari-web/app/controllers/main/admin/kerberos/step2_controller.js
@@ -92,7 +92,7 @@ App.KerberosWizardStep2Controller = App.WizardStep7Controller.extend(App.KDCCred
     }));
 
     this.filterConfigs(this.get('configs'));
-    if (App.get('supports.storeKDCCredentials') && !this.get('wizardController.skipClientInstall')) {
+    if (!this.get('wizardController.skipClientInstall')) {
       this.initilizeKDCStoreProperties(this.get('configs'));
     }
     this.applyServicesConfigs(this.get('configs'));
@@ -249,7 +249,7 @@ App.KerberosWizardStep2Controller = App.WizardStep7Controller.extend(App.KDCCred
    */
   createKerberosAdminSession: function (configs) {
     configs = configs || this.get('stepConfigs')[0].get('configs');
-    if (App.get('supports.storeKDCCredentials') && !this.get('wizardController.skipClientInstall')) {
+    if (!this.get('wizardController.skipClientInstall')) {
       return this.createKDCCredentials(configs);
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4544a57/ambari-web/app/mappers/hosts_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/hosts_mapper.js b/ambari-web/app/mappers/hosts_mapper.js
index d54c988..3eb99bf 100644
--- a/ambari-web/app/mappers/hosts_mapper.js
+++ b/ambari-web/app/mappers/hosts_mapper.js
@@ -92,7 +92,6 @@ App.hostsMapper = App.QuickDataMapper.create({
       var currentServiceComponentsMap = App.get('componentConfigMapper').buildServiceComponentMap(cacheServices);
       var newHostComponentsMap = {};
       var selectedHosts = App.db.getSelectedHosts('mainHostController');
-      var stackUpgradeSupport = App.get('supports.stackUpgrade');
       var clusterName = App.get('clusterName');
       var advancedHostComponents = [];
 
@@ -146,17 +145,15 @@ App.hostsMapper = App.QuickDataMapper.create({
           }
         }
 
-        if (stackUpgradeSupport) {
-          var currentVersion = item.stack_versions.findProperty('HostStackVersions.state', 'CURRENT');
-          var currentVersionNumber = currentVersion && currentVersion.repository_versions
-            ? Em.get(currentVersion.repository_versions[0], 'RepositoryVersions.repository_version') : '';
-          for (var j = 0; j < item.stack_versions.length; j++) {
-            var stackVersion = item.stack_versions[j];
-            stackVersion.host_name = item.Hosts.host_name;
-            stackVersion.is_visible = stringUtils.compareVersions(Em.get(stackVersion.repository_versions[0], 'RepositoryVersions.repository_version'), currentVersionNumber) >= 0
-              || App.get('supports.displayOlderVersions') || !currentVersionNumber;
-            stackVersions.push(this.parseIt(stackVersion, this.stackVersionConfig));
-          }
+        var currentVersion = item.stack_versions.findProperty('HostStackVersions.state', 'CURRENT');
+        var currentVersionNumber = currentVersion && currentVersion.repository_versions
+          ? Em.get(currentVersion.repository_versions[0], 'RepositoryVersions.repository_version') : '';
+        for (var j = 0; j < item.stack_versions.length; j++) {
+          var stackVersion = item.stack_versions[j];
+          stackVersion.host_name = item.Hosts.host_name;
+          stackVersion.is_visible = stringUtils.compareVersions(Em.get(stackVersion.repository_versions[0], 'RepositoryVersions.repository_version'), currentVersionNumber) >= 0
+            || App.get('supports.displayOlderVersions') || !currentVersionNumber;
+          stackVersions.push(this.parseIt(stackVersion, this.stackVersionConfig));
         }
 
         var alertsSummary = item.alerts_summary;
@@ -166,15 +163,13 @@ App.hostsMapper = App.QuickDataMapper.create({
         // There is no need to override existing index in host detail view since old model(already have indexes) will not be cleared.
         item.index = (existingHost && !json.itemTotal)? existingHost.get('index'): index;
 
-        if (stackUpgradeSupport) {
-          this.config = $.extend(this.config, {
-            stack_versions_key: 'stack_versions',
-            stack_versions_type: 'array',
-            stack_versions: {
-              item: 'HostStackVersions.id'
-            }
-          })
-        }
+        this.config = $.extend(this.config, {
+          stack_versions_key: 'stack_versions',
+          stack_versions_type: 'array',
+          stack_versions: {
+            item: 'HostStackVersions.id'
+          }
+        });
         var parsedItem = this.parseIt(item, this.config);
         parsedItem.is_requested = true;
         parsedItem.last_heart_beat_time = App.dateTimeWithTimeZone(parsedItem.last_heart_beat_time);
@@ -199,9 +194,7 @@ App.hostsMapper = App.QuickDataMapper.create({
       }
 
       App.store.commit();
-      if (stackUpgradeSupport) {
-        App.store.loadMany(App.HostStackVersion, stackVersions);
-      }
+      App.store.loadMany(App.HostStackVersion, stackVersions);
       App.store.loadMany(App.HostComponent, components);
       //"itemTotal" present only for Hosts page request
       if (!Em.isNone(json.itemTotal)) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4544a57/ambari-web/app/templates/common/modal_popups/invalid_KDC_popup.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/modal_popups/invalid_KDC_popup.hbs b/ambari-web/app/templates/common/modal_popups/invalid_KDC_popup.hbs
index 8c9142b..722e578 100644
--- a/ambari-web/app/templates/common/modal_popups/invalid_KDC_popup.hbs
+++ b/ambari-web/app/templates/common/modal_popups/invalid_KDC_popup.hbs
@@ -35,15 +35,13 @@
   <div class="control-group">
     <span class="control-label"></span>
     <div class="controls ">
-      {{#if App.supports.storeKDCCredentials}}
-        <label>
-          {{view Ember.Checkbox checkedBinding="view.parentView.storeCredentials" disabledBinding="view.parentView.checkboxDisabled" classNames="pull-left"}}
-          <span {{bindAttr class=":mls view.parentView.checkboxDisabled:muted"}}>
-            {{t admin.kerberos.credentials.store.label}}
-            <a class="icon-question-sign icon-blue" rel="tooltip" href="javascript:void(null);" data-toggle="tooltip" {{bindAttr data-original-title="view.parentView.hintMessage"}}><a/>
-          </span>
-        </label>
-      {{/if}}
+      <label>
+        {{view Ember.Checkbox checkedBinding="view.parentView.storeCredentials" disabledBinding="view.parentView.checkboxDisabled" classNames="pull-left"}}
+        <span {{bindAttr class=":mls view.parentView.checkboxDisabled:muted"}}>
+          {{t admin.kerberos.credentials.store.label}}
+          <a class="icon-question-sign icon-blue" rel="tooltip" href="javascript:void(null);" data-toggle="tooltip" {{bindAttr data-original-title="view.parentView.hintMessage"}}></a>
+        </span>
+      </label>
     </div>
   </div>
 </form>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4544a57/ambari-web/app/templates/main/admin/kerberos.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/kerberos.hbs b/ambari-web/app/templates/main/admin/kerberos.hbs
index c1d859f..1e3be1a 100644
--- a/ambari-web/app/templates/main/admin/kerberos.hbs
+++ b/ambari-web/app/templates/main/admin/kerberos.hbs
@@ -24,10 +24,8 @@
           {{#unless isManualKerberos}}
             <button class="btn btn-success" id="regenerate-keytabs" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action regenerateKeytabs target="controller"}}>
               <i class="icon-repeat"></i> {{t admin.kerberos.button.regenerateKeytabs}}</button>
-            {{#if App.supports.storeKDCCredentials}}
-              {{#if App.isCredentialStorePersistent}}
-                <button class="btn btn-primary" {{action showManageKDCCredentialsPopup target="controller"}}>{{t admin.kerberos.credentials.store.menu.label}}</button>
-              {{/if}}
+            {{#if App.isCredentialStorePersistent}}
+              <button class="btn btn-primary" {{action showManageKDCCredentialsPopup target="controller"}}>{{t admin.kerberos.credentials.store.menu.label}}</button>
             {{/if}}
           {{/unless}}
           <br/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4544a57/ambari-web/app/templates/main/host.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/host.hbs b/ambari-web/app/templates/main/host.hbs
index 233ff07..38f1b9a 100644
--- a/ambari-web/app/templates/main/host.hbs
+++ b/ambari-web/app/templates/main/host.hbs
@@ -40,7 +40,7 @@
         {{view view.parentView.memorySort}}
         {{view view.parentView.diskUsageSort}}
         {{view view.parentView.loadAvgSort}}
-        <th {{bindAttr class="App.supports.stackUpgrade::hidden App.stackVersionsAvailable::hidden :sort-view-11"}}>
+        <th {{bindAttr class="App.stackVersionsAvailable::hidden :sort-view-11"}}>
           {{t hosts.host.menu.stackVersions}}
         </th>
         <th class="sort-view-6">{{t common.components}}</th>
@@ -91,7 +91,7 @@
           </td>
 
           <td class="load-avg">{{host.loadAvg}}</td>
-          <td {{bindAttr class="App.supports.stackUpgrade::hidden App.stackVersionsAvailable::hidden view.hasSingleVersion:not-active-link :host-table-versions"}}>
+          <td {{bindAttr class="App.stackVersionsAvailable::hidden view.hasSingleVersion:not-active-link :host-table-versions"}}>
             <a href="#" class="black" {{action displayVersions target="view"}}>
               {{view.currentVersion}}
             </a>
@@ -106,7 +106,7 @@
     {{else}}
       <tr>
         <td class="first"></td>
-        <td {{bindAttr colspan="view.colspan"}}>
+        <td colspan="12">
           {{t hosts.table.noHosts}}
         </td>
       </tr>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4544a57/ambari-web/app/templates/main/host/summary.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/host/summary.hbs b/ambari-web/app/templates/main/host/summary.hbs
index 17a0b69..11b4861 100644
--- a/ambari-web/app/templates/main/host/summary.hbs
+++ b/ambari-web/app/templates/main/host/summary.hbs
@@ -159,10 +159,8 @@
                         <dt class="summary-agent-heart-label">{{t hosts.host.summary.agentHeartbeat}}:</dt>
                           <dd class="summary-agent-heart-value">&nbsp;{{view.timeSinceHeartBeat}}</dd>
 
-                        {{#if App.supports.stackUpgrade}}
-                          <dt class="summary-cur-ver-label">{{t hosts.host.summary.currentVersion}}:</dt>
-                            <dd class="summary-cur-ver-value">&nbsp;{{view.content.currentVersion}}</dd>
-                        {{/if}}
+                        <dt class="summary-cur-ver-label">{{t hosts.host.summary.currentVersion}}:</dt>
+                          <dd class="summary-cur-ver-value">&nbsp;{{view.content.currentVersion}}</dd>
                     </dl>
                 </div>
             </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4544a57/ambari-web/app/views/main/host.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/host.js b/ambari-web/app/views/main/host.js
index e38685c..5f337da 100644
--- a/ambari-web/app/views/main/host.js
+++ b/ambari-web/app/views/main/host.js
@@ -44,10 +44,6 @@ App.MainHostView = App.TableView.extend(App.TableServerViewMixin, {
    */
   requestError: null,
 
-  colspan: function () {
-    return 11 + +App.get('supports.stackUpgrade');
-  }.property("App.supports.stackUpgrade"),
-
   /**
    * List of hosts in cluster
    * @type {Array}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4544a57/ambari-web/app/views/main/host/menu.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/host/menu.js b/ambari-web/app/views/main/host/menu.js
index 5540014..d45d930 100644
--- a/ambari-web/app/views/main/host/menu.js
+++ b/ambari-web/app/views/main/host/menu.js
@@ -49,9 +49,7 @@ App.MainHostMenuView = Em.CollectionView.extend({
         name: 'versions',
         label: Em.I18n.t('hosts.host.menu.stackVersions'),
         routing: 'stackVersions',
-        hidden: function () {
-          return !App.get('supports.stackUpgrade') || !App.get('stackVersionsAvailable')
-        }.property('App.supports.stackUpgrade'),
+        hidden: !App.get('stackVersionsAvailable'),
         id: 'host-details-summary-version'
       }),
       Em.Object.create({

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4544a57/ambari-web/test/controllers/global/cluster_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/global/cluster_controller_test.js b/ambari-web/test/controllers/global/cluster_controller_test.js
index d2c6876..2bf4ab4 100644
--- a/ambari-web/test/controllers/global/cluster_controller_test.js
+++ b/ambari-web/test/controllers/global/cluster_controller_test.js
@@ -212,27 +212,7 @@ describe('App.clusterController', function () {
       App.get.restore();
     });
 
-    it("KDC Store supports disabled, credentials updated via kdc session call", function() {
-      this.stub.withArgs('supports.storeKDCCredentials').returns(false);
-      controller.createKerberosAdminSession({
-        principal: 'admin',
-        key: 'pass',
-        type: 'persistent'
-      }, {});
-      var args = testHelpers.findAjaxRequest('name', 'common.cluster.update');
-      expect(args[0]).to.exists;
-      expect(args[0].sender).to.be.eql(controller);
-      expect(args[0].data).to.be.eql({
-        clusterName: 'test',
-        data: [{
-          session_attributes: {
-            kerberos_admin: {principal: "admin", password: "pass"}
-          }
-        }]
-      });
-    });
-    it("KDC Store supports enabled, credentials updated via credentials storage call", function() {
-      this.stub.withArgs('supports.storeKDCCredentials').returns(true);
+    it("credentials updated via credentials storage call", function() {
       controller.createKerberosAdminSession({
         principal: 'admin',
         key: 'pass',

http://git-wip-us.apache.org/repos/asf/ambari/blob/e4544a57/ambari-web/test/views/main/host/menu_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/host/menu_test.js b/ambari-web/test/views/main/host/menu_test.js
index 520314d..f9636d8 100644
--- a/ambari-web/test/views/main/host/menu_test.js
+++ b/ambari-web/test/views/main/host/menu_test.js
@@ -38,32 +38,17 @@ describe('App.MainHostMenuView', function () {
     Em.A([
         {
           stackVersionsAvailable: true,
-          stackUpgrade: true,
           m: '`versions` is visible',
           e: false
         },
         {
-          stackVersionsAvailable: true,
-          stackUpgrade: false,
-          m: '`versions` is invisible (1)',
-          e: true
-        },
-        {
-          stackVersionsAvailable: false,
-          stackUpgrade: true,
-          m: '`versions` is invisible (2)',
-          e: true
-        },
-        {
           stackVersionsAvailable: false,
-          stackUpgrade: false,
-          m: '`versions` is invisible (3)',
+          m: '`versions` is invisible',
           e: true
         }
       ]).forEach(function (test) {
         it(test.m, function () {
           this.mock.withArgs('stackVersionsAvailable').returns(test.stackVersionsAvailable);
-          this.mock.withArgs('supports.stackUpgrade').returns(test.stackUpgrade);
           view.propertyDidChange('content');
           expect(view.get('content').findProperty('name', 'versions').get('hidden')).to.equal(test.e);
         });


[04/21] ambari git commit: AMBARI-15331. AMS HBase FIFO compaction policy and Normalizer settings are not handled correctly. (Shantanu Mundkur via swagle)

Posted by jl...@apache.org.
AMBARI-15331. AMS HBase FIFO compaction policy and Normalizer settings are not handled correctly. (Shantanu Mundkur via swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/51669089
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/51669089
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/51669089

Branch: refs/heads/AMBARI-13364
Commit: 5166908915fbef7b3e9cab2a5bfd17e44bfc1953
Parents: 98b2b23
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Wed Mar 9 19:25:25 2016 -0800
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Wed Mar 9 19:25:31 2016 -0800

----------------------------------------------------------------------
 .../conf/unix/ambari-metrics-collector                  | 12 ++++++------
 .../apache/ambari/server/upgrade/UpgradeCatalog220.java | 10 +++++-----
 .../AMBARI_METRICS/0.1.0/configuration/ams-env.xml      |  2 +-
 .../ambari/server/upgrade/UpgradeCatalog220Test.java    |  6 +++---
 4 files changed, 15 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/51669089/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector b/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
index 64a7848..f75a8e5 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
+++ b/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
@@ -36,12 +36,6 @@ METRIC_TABLES=(METRIC_AGGREGATE_DAILY METRIC_AGGREGATE_HOURLY METRIC_AGGREGATE_M
 METRIC_FIFO_COMPACTION_TABLES=(METRIC_AGGREGATE METRIC_RECORD METRIC_RECORD_MINUTE)
 METRIC_COLLECTOR=ambari-metrics-collector
 
-AMS_COLLECTOR_LOG_DIR=/var/log/ambari-metrics-collector
-
-AMS_HBASE_NORMALIZER_ENABLED=true
-AMS_HBASE_FIFO_COMPACTION_ENABLED=true
-AMS_HBASE_INIT_CHECK_ENABLED=true
-
 NORMALIZER_ENABLED_STUB_FILE=/var/run/ambari-metrics-collector/normalizer_enabled
 FIFO_ENABLED_STUB_FILE=/var/run/ambari-metrics-collector/fifo_enabled
 
@@ -376,6 +370,12 @@ else
   exit 1
 fi
 
+# set these env variables only if they were not set by ams-env.sh
+: ${AMS_COLLECTOR_LOG_DIR:=/var/log/ambari-metrics-collector}
+: ${AMS_HBASE_NORMALIZER_ENABLED:=true}
+: ${AMS_HBASE_FIFO_COMPACTION_ENABLED:=true}
+: ${AMS_HBASE_INIT_CHECK_ENABLED:=true}
+
 # set pid dir path
 if [[ -n "${AMS_COLLECTOR_PID_DIR}" ]]; then
   PIDFILE=${AMS_COLLECTOR_PID_DIR}/ambari-metrics-collector.pid

http://git-wip-us.apache.org/repos/asf/ambari/blob/51669089/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
index ac6b3c5..40dcd2f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
@@ -1216,16 +1216,16 @@ public class UpgradeCatalog220 extends AbstractUpgradeCatalog {
         "export AMS_COLLECTOR_OPTS=\"$AMS_COLLECTOR_OPTS $AMS_COLLECTOR_GC_OPTS\"\n";
     }
 
-    if (!content.contains("HBASE_NORMALIZATION_ENABLED")) {
+    if (!content.contains("AMS_HBASE_NORMALIZER_ENABLED")) {
       content += "\n" +
-        "# HBase compaction policy enabled\n" +
-        "export HBASE_NORMALIZATION_ENABLED={{ams_hbase_normalizer_enabled}}\n";
+        "# HBase normalizer enabled\n" +
+        "export AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}\n";
     }
 
-    if (!content.contains("HBASE_FIFO_COMPACTION_POLICY_ENABLED")) {
+    if (!content.contains("AMS_HBASE_FIFO_COMPACTION_ENABLED")) {
       content += "\n" +
         "# HBase compaction policy enabled\n" +
-        "export HBASE_FIFO_COMPACTION_POLICY_ENABLED={{ams_hbase_fifo_compaction_policy_enabled}}\n";
+        "export AMS_HBASE_FIFO_COMPACTION_ENABLED={{ams_hbase_fifo_compaction_enabled}}\n";
     }
 
     return content;

http://git-wip-us.apache.org/repos/asf/ambari/blob/51669089/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
index 78b8999..836e159 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
@@ -101,7 +101,7 @@ export AMS_HBASE_PID_DIR={{hbase_pid_dir}}
 # AMS Collector heapsize
 export AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}
 
-# HBase compaction policy enabled
+# HBase normalizer enabled
 export AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}
 
 # HBase compaction policy enabled

http://git-wip-us.apache.org/repos/asf/ambari/blob/51669089/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
index 8263001..99cabfa 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
@@ -836,11 +836,11 @@ public class UpgradeCatalog220Test {
       "-Xloggc:{{ams_collector_log_dir}}/collector-gc.log-`date +'%Y%m%d%H%M'`\"\n" +
       "export AMS_COLLECTOR_OPTS=\"$AMS_COLLECTOR_OPTS $AMS_COLLECTOR_GC_OPTS\"\n"+
       "\n" +
-      "# HBase compaction policy enabled\n" +
-      "export HBASE_NORMALIZATION_ENABLED={{ams_hbase_normalizer_enabled}}\n" +
+      "# HBase normalizer enabled\n" +
+      "export AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}\n" +
       "\n" +
       "# HBase compaction policy enabled\n" +
-      "export HBASE_FIFO_COMPACTION_POLICY_ENABLED={{ams_hbase_fifo_compaction_policy_enabled}}\n";
+      "export AMS_HBASE_FIFO_COMPACTION_ENABLED={{ams_hbase_fifo_compaction_enabled}}\n";
 
     String result = (String) updateAmsEnvContent.invoke(upgradeCatalog220, oldContent);
     Assert.assertEquals(expectedContent, result);


[21/21] ambari git commit: AMBARI-15378 Create multiple version state filters instead of one combined one on host page for version page (Joe Wang via rzang)

Posted by jl...@apache.org.
AMBARI-15378 Create multiple version state filters instead of one combined one on host page for version page (Joe Wang via rzang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/171379ab
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/171379ab
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/171379ab

Branch: refs/heads/AMBARI-13364
Commit: 171379abd1bd525c5ff9693f7e8bc27bc182fec4
Parents: a9d5621
Author: Richard Zang <rz...@apache.org>
Authored: Thu Mar 10 15:08:35 2016 -0800
Committer: Richard Zang <rz...@apache.org>
Committed: Thu Mar 10 15:08:35 2016 -0800

----------------------------------------------------------------------
 ambari-web/app/controllers/main/host.js | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/171379ab/ambari-web/app/controllers/main/host.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/host.js b/ambari-web/app/controllers/main/host.js
index efc0ba2..ec745e7 100644
--- a/ambari-web/app/controllers/main/host.js
+++ b/ambari-web/app/controllers/main/host.js
@@ -528,7 +528,7 @@ App.MainHostController = Em.ArrayController.extend(App.TableServerMixin, {
     if (Em.isNone(displayName) || Em.isNone(states) || !states.length) return;
     var colPropAssoc = this.get('colPropAssoc');
     var map = this.get('labelValueMap');
-    var displayStates = [];
+    var stateFilterStrs = [];
 
     var versionFilter = {
       iColumn: 16,
@@ -544,12 +544,11 @@ App.MainHostController = Em.ArrayController.extend(App.TableServerMixin, {
     map["Version State"] = colPropAssoc[stateFilter.iColumn];
     stateFilter.value.forEach(function(state) {
       map[App.HostStackVersion.formatStatus(state)] = state;
-      displayStates.push(App.HostStackVersion.formatStatus(state));
+      stateFilterStrs.push('"Version State": "' + App.HostStackVersion.formatStatus(state) + '"');
     });
     var versionFilterStr = '"Stack Version": "' + versionFilter.value + '"';
-    var stateFilterStr = '"Version State": "' + displayStates.join(',') + '"';
     App.db.setFilterConditions(this.get('name'), [versionFilter, stateFilter]);
-    App.db.setComboSearchQuery(this.get('name'), [versionFilterStr, stateFilterStr].join(' '));
+    App.db.setComboSearchQuery(this.get('name'), [versionFilterStr, stateFilterStrs.join(' ')].join(' '));
   },
 
   goToHostAlerts: function (event) {


[08/21] ambari git commit: AMBARI-15351 Remove Service: Delete confirmation tweaks. (atkach)

Posted by jl...@apache.org.
AMBARI-15351 Remove Service: Delete confirmation tweaks. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f9d317bd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f9d317bd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f9d317bd

Branch: refs/heads/AMBARI-13364
Commit: f9d317bd29dd88e46038ebfab34682ea524f2d37
Parents: 0c44129
Author: Andrii Tkach <at...@hortonworks.com>
Authored: Thu Mar 10 12:34:48 2016 +0200
Committer: Andrii Tkach <at...@hortonworks.com>
Committed: Thu Mar 10 12:34:48 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/controllers/main/service/item.js     | 16 ++++++++++------
 ambari-web/app/messages.js                          |  5 +++--
 .../test/controllers/main/service/item_test.js      |  2 +-
 3 files changed, 14 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f9d317bd/ambari-web/app/controllers/main/service/item.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/item.js b/ambari-web/app/controllers/main/service/item.js
index eda0485..f37891d 100644
--- a/ambari-web/app/controllers/main/service/item.js
+++ b/ambari-web/app/controllers/main/service/item.js
@@ -1167,11 +1167,14 @@ App.MainServiceItemController = Em.Controller.extend(App.SupportClientConfigsDow
    * @param {string} [servicesToDeleteFmt]
    */
   confirmDeleteService: function (serviceName, dependentServiceNames, servicesToDeleteFmt) {
-    var message = dependentServiceNames && dependentServiceNames.length
-        ? Em.I18n.t('services.service.confirmDelete.popup.body.dependent').format(App.format.role(serviceName), servicesToDeleteFmt)
-        : Em.I18n.t('services.service.confirmDelete.popup.body').format(App.format.role(serviceName)),
-        confirmKey = 'yes',
-        self = this;
+    var confirmKey = 'delete',
+        self = this,
+        message = Em.I18n.t('services.service.confirmDelete.popup.body').format(App.format.role(serviceName), confirmKey);
+
+    if (dependentServiceNames.length > 0) {
+      message = Em.I18n.t('services.service.confirmDelete.popup.body.dependent')
+                .format(App.format.role(serviceName), servicesToDeleteFmt, confirmKey);
+    }
 
     App.ModalPopup.show({
 
@@ -1213,9 +1216,10 @@ App.MainServiceItemController = Em.Controller.extend(App.SupportClientConfigsDow
        */
       bodyClass: Em.View.extend({
         confirmKey: confirmKey,
+        typeMessage: Em.I18n.t('services.service.confirmDelete.popup.body.type').format(confirmKey),
         template: Em.Handlebars.compile(message +
         '<div class="form-inline align-center"></br>' +
-        '<label><b>{{t common.enter}}&nbsp;{{view.confirmKey}}</b></label>&nbsp;' +
+        '<label><b>{{view.typeMessage}}</b></label>&nbsp;' +
         '{{view Ember.TextField valueBinding="view.parentView.confirmInput" class="input-small"}}</br>' +
         '</div>')
       }),

http://git-wip-us.apache.org/repos/asf/ambari/blob/f9d317bd/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 8e69dd0..6b8317b 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -1724,10 +1724,11 @@ Em.I18n.translations = {
   ' and configuration history will be lost.</b>',
   'services.service.delete.popup.warning.dependent': '<b>Note! {0} will be deleted too.</b>',
   'services.service.confirmDelete.popup.header': 'Confirm Delete',
-  'services.service.confirmDelete.popup.body': 'You must confirm delete of <b>{0}</b> by typing "yes"' +
+  'services.service.confirmDelete.popup.body': 'You must confirm delete of <b>{0}</b> by typing "{1}"' +
   ' in the confirmation box. <b>This operation is not reversible and all configuration history will be lost.</b>',
+  'services.service.confirmDelete.popup.body.type': 'Type "{0}" to confirm',
 
-  'services.service.confirmDelete.popup.body.dependent': 'You must confirm delete of <b>{0}</b> and <b>{1}</b> by typing "yes"' +
+  'services.service.confirmDelete.popup.body.dependent': 'You must confirm delete of <b>{0}</b> and <b>{1}</b> by typing "{2}"' +
   ' in the confirmation box. <b>This operation is not reversible and all configuration history will be lost.</b>',
   'services.service.summary.unknown':'unknown',
   'services.service.summary.notRunning':'Not Running',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f9d317bd/ambari-web/test/controllers/main/service/item_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service/item_test.js b/ambari-web/test/controllers/main/service/item_test.js
index f2fcd2d..dbb4463 100644
--- a/ambari-web/test/controllers/main/service/item_test.js
+++ b/ambari-web/test/controllers/main/service/item_test.js
@@ -1362,7 +1362,7 @@ describe('App.MainServiceItemController', function () {
     });
 
     it("App.ModalPopup.show should be called", function() {
-      mainServiceItemController.confirmDeleteService();
+      mainServiceItemController.confirmDeleteService('S1', [], '');
       expect(App.ModalPopup.show.calledOnce).to.be.true;
     });
   });


[14/21] ambari git commit: AMBARI-13216: Add a "Add" button to Repo management UI. (mithmatt)

Posted by jl...@apache.org.
AMBARI-13216: Add a "Add" button to Repo management UI. (mithmatt)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3f3fdbd4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3f3fdbd4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3f3fdbd4

Branch: refs/heads/AMBARI-13364
Commit: 3f3fdbd47a20a6401c7090a909d5235ccf996d83
Parents: e4544a5
Author: Matt <mm...@pivotal.io>
Authored: Thu Mar 10 03:52:33 2016 -0800
Committer: Matt <mm...@pivotal.io>
Committed: Thu Mar 10 03:52:33 2016 -0800

----------------------------------------------------------------------
 .../main/resources/ui/admin-web/app/index.html  |  1 +
 .../app/scripts/controllers/mainCtrl.js         |  8 +-
 .../stackVersions/StackVersionsCreateCtrl.js    | 11 ++-
 .../stackVersions/StackVersionsEditCtrl.js      |  9 +-
 .../app/scripts/services/AddRepositoryModal.js  | 98 +++++++++++++++++++
 .../resources/ui/admin-web/app/styles/main.css  |  5 +-
 .../app/views/modals/AddRepositoryModal.html    | 63 +++++++++++++
 .../views/stackVersions/stackVersionPage.html   |  8 +-
 .../test/unit/controllers/mainCtrl_test.js      |  1 +
 .../unit/services/AddRepositoryModal_test.js    | 99 ++++++++++++++++++++
 ambari-web/app/config.js                        |  3 +-
 11 files changed, 298 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3f3fdbd4/ambari-admin/src/main/resources/ui/admin-web/app/index.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/index.html b/ambari-admin/src/main/resources/ui/admin-web/app/index.html
index e7cda02..6557b55 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/index.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/index.html
@@ -162,6 +162,7 @@
     <script src="scripts/services/GetDifference.js"></script>
     <script src="scripts/services/UnsavedDialog.js"></script>
     <script src="scripts/services/Stack.js"></script>
+    <script src="scripts/services/AddRepositoryModal.js"></script>
     <!-- endbuild -->
 </body>
 </html>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f3fdbd4/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/mainCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/mainCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/mainCtrl.js
index 17f5981..5d1d261 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/mainCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/mainCtrl.js
@@ -18,7 +18,7 @@
 'use strict';
 
 angular.module('ambariAdminConsole')
-.controller('MainCtrl',['$scope','$rootScope','$window','Auth', 'Alert', '$modal', 'Cluster', 'View', '$translate', function($scope, $rootScope, $window, Auth, Alert, $modal, Cluster, View, $translate) {
+.controller('MainCtrl',['$scope','$rootScope','$window','Auth', 'Alert', '$modal', 'Cluster', 'View', '$translate', '$http', 'Settings', function($scope, $rootScope, $window, Auth, Alert, $modal, Cluster, View, $translate, $http, Settings) {
   var $t = $translate.instant;
   $scope.signOut = function() {
     Auth.signout().finally(function() {
@@ -27,6 +27,12 @@ angular.module('ambariAdminConsole')
   };
 
   $scope.ambariVersion = null;
+  $rootScope.supports = {};
+
+  $http.get(Settings.baseUrl + "/persist/user-pref-" + Auth.getCurrentUser() + "-supports")
+      .then(function(data) {
+        $rootScope.supports = data.data ? data.data : {};
+      });
 
   $scope.about = function() {
    var ambariVersion = $scope.ambariVersion;

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f3fdbd4/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
index 46e4a11..544e282 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
@@ -18,7 +18,7 @@
 'use strict';
 
 angular.module('ambariAdminConsole')
-.controller('StackVersionsCreateCtrl', ['$scope', 'Stack', '$routeParams', '$location', 'Alert', '$translate', 'Cluster', function($scope, Stack, $routeParams, $location, Alert, $translate, Cluster) {
+.controller('StackVersionsCreateCtrl', ['$scope', 'Stack', '$routeParams', '$location', 'Alert', '$translate', 'Cluster', 'AddRepositoryModal', function($scope, Stack, $routeParams, $location, Alert, $translate, Cluster, AddRepositoryModal) {
   var $t = $translate.instant;
   $scope.constants = {
     os: $t('versions.os')
@@ -195,6 +195,13 @@ angular.module('ambariAdminConsole')
     }
   };
 
+  /**
+   * On click handler for adding a new repository
+   */
+  $scope.addRepository = function() {
+    AddRepositoryModal.show($scope.osList, $scope.upgradeStack.stack_name, $scope.upgradeStack.stack_version, $scope.id);
+  };
+
   $scope.isSaveButtonDisabled = function() {
     var enabled = false;
     $scope.osList.forEach(function(os) {
@@ -203,7 +210,7 @@ angular.module('ambariAdminConsole')
       }
     });
     return !enabled;
-  }
+  };
 
   $scope.defaulfOSRepos = {};
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f3fdbd4/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
index d1da4c4..2c3f000 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
@@ -18,7 +18,7 @@
 'use strict';
 
 angular.module('ambariAdminConsole')
-.controller('StackVersionsEditCtrl', ['$scope', '$location', 'Cluster', 'Stack', '$routeParams', 'ConfirmationModal', 'Alert', '$translate', function($scope, $location, Cluster, Stack, $routeParams, ConfirmationModal, Alert, $translate) {
+.controller('StackVersionsEditCtrl', ['$scope', '$location', 'Cluster', 'Stack', '$routeParams', 'ConfirmationModal', 'Alert', '$translate', 'AddRepositoryModal', function($scope, $location, Cluster, Stack, $routeParams, ConfirmationModal, Alert, $translate, AddRepositoryModal) {
   var $t = $translate.instant;
   $scope.constants = {
     os: $t('versions.os')
@@ -234,6 +234,13 @@ angular.module('ambariAdminConsole')
     }
   };
 
+  /**
+   * On click handler for adding a new repository
+   */
+  $scope.addRepository = function() {
+    AddRepositoryModal.show($scope.osList, $scope.upgradeStack.stack_name, $scope.upgradeStack.stack_version, $scope.id);
+  };
+
   $scope.isSaveButtonDisabled = function() {
     var enabled = false;
     $scope.osList.forEach(function(os) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f3fdbd4/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/AddRepositoryModal.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/AddRepositoryModal.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/AddRepositoryModal.js
new file mode 100644
index 0000000..96a0155
--- /dev/null
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/AddRepositoryModal.js
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+'use strict';
+
+angular.module('ambariAdminConsole')
+  .factory('AddRepositoryModal', ['$modal', '$q', function($modal, $q) {
+    var modalObject = {};
+
+    modalObject.repoExists = function(existingRepos, repoId) {
+      for(var i = existingRepos.length - 1; i >= 0; --i) {
+        if (existingRepos[i].Repositories.repo_id === repoId) {
+          return true;
+        }
+      }
+      return false;
+    };
+
+    modalObject.getRepositoriesForOS = function (osList, selectedOS) {
+      // Get existing list of repositories for selectedOS
+      for (var i = osList.length - 1; i >= 0; --i) {
+        if (osList[i].OperatingSystems.os_type === selectedOS) {
+          osList[i].repositories = osList[i].repositories || [];
+          return osList[i].repositories;
+        }
+      }
+      return null;
+    };
+
+    modalObject.show = function (osList, stackName, stackVersion, repositoryVersionId) {
+      var deferred = $q.defer();
+      var modalInstance = $modal.open({
+        templateUrl: 'views/modals/AddRepositoryModal.html',
+        controller: ['$scope', '$modalInstance', function ($scope, $modalInstance) {
+          $scope.osTypes = osList.map(function (os) {
+            return os.OperatingSystems.os_type;
+          });
+          $scope.repo = {
+            selectedOS: $scope.osTypes[0]
+          };
+
+          $scope.add = function (newRepo) {
+            var repositoriesForOS = modalObject.getRepositoriesForOS(osList, newRepo.selectedOS);
+
+            // If repo with the same id exists for the selectedOS, show an alert and do not take any action
+            $scope.showAlert = modalObject.repoExists(repositoriesForOS, newRepo.id);
+            if ($scope.showAlert) {
+              return;
+            }
+
+            // If no duplicate repository is found on the selectedOS, add the new repository
+            repositoriesForOS.push({
+              Repositories: {
+                repo_id: newRepo.id,
+                repo_name: newRepo.name,
+                os_type: newRepo.selectedOS,
+                base_url: newRepo.baseUrl,
+                stack_name: stackName,
+                stack_version: stackVersion,
+                repository_version_id: repositoryVersionId
+              }
+            });
+
+            $modalInstance.close();
+            deferred.resolve();
+          };
+
+          $scope.cancel = function () {
+            $modalInstance.dismiss();
+            deferred.reject();
+          };
+        }]
+      });
+      modalInstance.result.then(function () {
+        // Gets triggered on close
+      }, function () {
+        // Gets triggered on dismiss
+        deferred.reject();
+      });
+      return deferred.promise;
+    };
+
+    return modalObject;
+  }]);
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f3fdbd4/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css b/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
index 9348564..c435914 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
@@ -1487,5 +1487,6 @@ thead.view-permission-header > tr > th {
   text-align: center;
 }
 
-
-
+.pull-up {
+  margin-top: -2px;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f3fdbd4/ambari-admin/src/main/resources/ui/admin-web/app/views/modals/AddRepositoryModal.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/modals/AddRepositoryModal.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/modals/AddRepositoryModal.html
new file mode 100644
index 0000000..a439bde
--- /dev/null
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/modals/AddRepositoryModal.html
@@ -0,0 +1,63 @@
+<!--
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+-->
+<div class="modal-header" xmlns="http://www.w3.org/1999/html">
+    <h3 class="modal-title">Add Repository</h3>
+</div>
+<form class="form-horizontal" name="addRepoForm" novalidate>
+    <div class="modal-body">
+        <div class="alert alert-warning hide-soft" ng-class="{'visible' : showAlert}" role="alert">
+            A repository with the same Repo ID already exists for {{repo.selectedOS}}!
+        </div>
+        <div class="form-group">
+            <div class="col-sm-3">
+                <label class="control-label">OS</label>
+            </div>
+            <div class="col-sm-4">
+                <select class="form-control" ng-options="os for os in osTypes" ng-model="repo.selectedOS"></select>
+            </div>
+        </div>
+        <div class="form-group">
+            <div class="col-sm-3">
+                <label class="control-label">Repo ID</label>
+            </div>
+            <div class="col-sm-9">
+                <input name="repoId" type="text" class="form-control" ng-model="repo.id" ng-required="true">
+            </div>
+        </div>
+        <div class="form-group">
+            <div class="col-sm-3">
+                <label class="control-label">Repo Name</label>
+            </div>
+            <div class="col-sm-9">
+                <input name="repoName" type="text" class="form-control" ng-model="repo.name" ng-required="true">
+            </div>
+        </div>
+        <div class="form-group">
+            <div class="col-sm-3">
+                <label class="control-label">Base URL</label>
+            </div>
+            <div class="col-sm-9">
+                <input name="repoUrl" type="text" class="form-control" ng-model="repo.baseUrl" ng-required="true">
+            </div>
+        </div>
+    </div>
+    <div class="modal-footer">
+        <button class="btn btn-default" ng-click="cancel()">Cancel</button>
+        <button class="btn btn-primary" ng-disabled="addRepoForm.$invalid" ng-click="add(repo)" >Add</button>
+    </div>
+</form>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f3fdbd4/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
index 839b47d..1eac23f 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
@@ -127,7 +127,13 @@
   </div>
   <div class="panel panel-default repos-panel">
     <div class="panel-heading">
-      <h3 class="panel-title">{{'versions.repos' | translate}}</h3>
+      <h3 class="panel-title">
+          {{'versions.repos' | translate}}
+          <button ng-show="supports.addingNewRepository" class="btn btn-primary pull-right btn-xs pull-up" ng-click="addRepository()">
+              <span class="glyphicon glyphicon-plus"></span>
+              {{'common.add' | translate:'{ term: "Repository" }'}}
+          </button>
+      </h3>
     </div>
     <div class="panel-body">
       <div class="alert alert-info" role="alert">{{'versions.alerts.baseURLs' | translate}}</div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f3fdbd4/ambari-admin/src/main/resources/ui/admin-web/test/unit/controllers/mainCtrl_test.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/test/unit/controllers/mainCtrl_test.js b/ambari-admin/src/main/resources/ui/admin-web/test/unit/controllers/mainCtrl_test.js
index 8d748d2..e12a61d 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/test/unit/controllers/mainCtrl_test.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/test/unit/controllers/mainCtrl_test.js
@@ -97,6 +97,7 @@ describe('#Auth', function () {
             }
           ]
         });
+      $httpBackend.whenGET(/\/persist\/user-pref-.*/).respond(200, {data: {data: {addingNewRepository: true}}});
       scope = $rootScope.$new();
       scope.$apply();
       ctrl = $controller('MainCtrl', {$scope: scope});

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f3fdbd4/ambari-admin/src/main/resources/ui/admin-web/test/unit/services/AddRepositoryModal_test.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/test/unit/services/AddRepositoryModal_test.js b/ambari-admin/src/main/resources/ui/admin-web/test/unit/services/AddRepositoryModal_test.js
new file mode 100644
index 0000000..73239b5
--- /dev/null
+++ b/ambari-admin/src/main/resources/ui/admin-web/test/unit/services/AddRepositoryModal_test.js
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+describe('AddRepositoryModal Service', function () {
+  var AddRepositoryModal, $modal;
+  
+  beforeEach(module('ambariAdminConsole', function($provide){
+  }));
+  
+  beforeEach(inject(function (_AddRepositoryModal_, _$modal_) {
+    AddRepositoryModal = _AddRepositoryModal_;
+    $modal = _$modal_;
+
+    spyOn($modal, 'open').andReturn({
+      result: {
+        then: function() {
+        }
+      }
+    });
+  }));
+
+  it('should open modal window', function () {
+    AddRepositoryModal.show();
+    expect($modal.open).toHaveBeenCalled();
+  });
+
+  it('should check if repo exists', function () {
+    var existingRepos = [
+      {
+        Repositories: {
+          repo_id: 'repo1'
+        }
+      }
+    ];
+    expect(AddRepositoryModal.repoExists(existingRepos, 'repo1')).toBe(true);
+    expect(AddRepositoryModal.repoExists(existingRepos, 'repo2')).toBe(false);
+  });
+
+  it('should get repositories for selected OS', function () {
+    var os1Repos = [
+      {
+        Repositories: {
+          os_type: 'os1',
+          repo_id: 'repo1'
+        }
+      }, {
+        Repositories: {
+          os_type: 'os1',
+          repo_id: 'repo2'
+        }
+      }
+    ];
+    var os2Repos = [
+      {
+        Repositories: {
+          os_type: 'os2',
+          repo_id: 'repo1'
+        }
+      },{
+        Repositories: {
+          os_type: 'os2',
+          repo_id: 'repo2'
+        }
+      }
+    ];
+
+    var osList = [
+      {
+        OperatingSystems: {
+          os_type: 'os1'
+        },
+        repositories: os1Repos
+      }, {
+        OperatingSystems: {
+          os_type: 'os2'
+        },
+        repositories: os2Repos
+      }
+    ];
+    expect(AddRepositoryModal.getRepositoriesForOS(osList, 'os1')).toEqual(os1Repos);
+    expect(AddRepositoryModal.getRepositoriesForOS(osList, 'os2')).toEqual(os2Repos);
+    expect(AddRepositoryModal.getRepositoriesForOS(osList, 'os3')).toBe(null);
+  });
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f3fdbd4/ambari-web/app/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/config.js b/ambari-web/app/config.js
index 92ba8ae..5d3b27a 100644
--- a/ambari-web/app/config.js
+++ b/ambari-web/app/config.js
@@ -79,7 +79,8 @@ App.supports = {
   hostComboSearchBox: true,
   serviceAutoStart: false,
   logSearch: false,
-  redhatSatellite: false
+  redhatSatellite: false,
+  addingNewRepository: false
 };
 
 if (App.enableExperimental) {


[07/21] ambari git commit: AMBARI-15350. Typo in the dependent key (onechiporenko)

Posted by jl...@apache.org.
AMBARI-15350. Typo in the dependent key (onechiporenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0c44129a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0c44129a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0c44129a

Branch: refs/heads/AMBARI-13364
Commit: 0c44129ad18fc6c30e1f49ffa0580fd2d64c3351
Parents: 26b3357
Author: Oleg Nechiporenko <on...@apache.org>
Authored: Wed Mar 9 14:22:01 2016 +0200
Committer: Oleg Nechiporenko <on...@apache.org>
Committed: Thu Mar 10 10:06:52 2016 +0200

----------------------------------------------------------------------
 .../controllers/main/service/info/configs.js    |  2 +-
 .../widgets/create/wizard_controller_test.js    | 11 ++++-------
 ambari-web/test/controllers/main_test.js        | 20 +++++++-------------
 3 files changed, 12 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0c44129a/ambari-web/app/controllers/main/service/info/configs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/info/configs.js b/ambari-web/app/controllers/main/service/info/configs.js
index a22bb48..85346d6 100644
--- a/ambari-web/app/controllers/main/service/info/configs.js
+++ b/ambari-web/app/controllers/main/service/info/configs.js
@@ -53,7 +53,7 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ConfigsLoader, A
     return this.get('groupsStore').filter(function(group) {
       return this.get('dependentServiceNames').contains(group.get('serviceName'));
     }, this);
-  }.property('content.serviceName', 'dependentServiceNames', 'groupsStore.length', 'groupStore.@each.name'),
+  }.property('content.serviceName', 'dependentServiceNames', 'groupsStore.length', 'groupsStore.@each.name'),
 
   allConfigs: [],
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/0c44129a/ambari-web/test/controllers/main/service/widgets/create/wizard_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service/widgets/create/wizard_controller_test.js b/ambari-web/test/controllers/main/service/widgets/create/wizard_controller_test.js
index 4733716..92d3642 100644
--- a/ambari-web/test/controllers/main/service/widgets/create/wizard_controller_test.js
+++ b/ambari-web/test/controllers/main/service/widgets/create/wizard_controller_test.js
@@ -36,16 +36,13 @@ describe('App.WidgetWizardController', function () {
   describe("#substitueQueueMetrics", function () {
     beforeEach(function () {
       controller = App.WidgetWizardController.create();
-      sinon.stub(App.YARNService, 'find', function (k) {
-        if ('YARN' === k) {
-          return Em.Object.create({
-            'allQueueNames': ["root", "root/queue1", "root/queue1/queue2", "root/queue1/queue3"]
-          });
-        }
+      sinon.stub(App.YARNService, 'find', function () {
+        return Em.Object.create({
+          'allQueueNames': ["root", "root/queue1", "root/queue1/queue2", "root/queue1/queue3"]
+        });
       });
     });
     afterEach(function () {
-      controller = '';
       App.YARNService.find.restore();
     });
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/0c44129a/ambari-web/test/controllers/main_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main_test.js b/ambari-web/test/controllers/main_test.js
index ce5e021..c026604 100644
--- a/ambari-web/test/controllers/main_test.js
+++ b/ambari-web/test/controllers/main_test.js
@@ -144,19 +144,13 @@ describe('App.MainController', function () {
 
   describe('#updateTitle', function() {
     beforeEach(function () {
-      sinon.stub(App.router, 'get', function(message){
-        if (message === 'clusterController.clusterName') {
-          return 'c1';
-        } else if (message === 'clusterInstallCompleted') {
-          return true;
-        } else if (message === 'clusterController') {
-          return {
-            get: function() {
-              return true;
-            }
-          };
-        }
-      });
+      sinon.stub(App.router, 'get').withArgs('clusterController.clusterName').returns('c1')
+        .withArgs('clusterInstallCompleted').returns(true)
+        .withArgs('clusterController').returns({
+          get: function() {
+            return true;
+          }
+        });
     });
     afterEach(function () {
       App.router.get.restore();


[17/21] ambari git commit: AMBARI-15265. Install & Manage Zeppelin with Ambari (Renjith Kamath via smohanty)

Posted by jl...@apache.org.
AMBARI-15265. Install & Manage Zeppelin with Ambari (Renjith Kamath via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/76627aa8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/76627aa8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/76627aa8

Branch: refs/heads/AMBARI-13364
Commit: 76627aa8e4af8818fff6e9cd56e67b3df5344b10
Parents: e1686f5
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Thu Mar 10 10:50:12 2016 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Thu Mar 10 10:50:12 2016 -0800

----------------------------------------------------------------------
 .../ZEPPELIN/0.6.0.2.5/alerts.json              |  18 ++
 .../0.6.0.2.5/configuration/zeppelin-config.xml | 179 +++++++++++++++
 .../0.6.0.2.5/configuration/zeppelin-env.xml    | 163 ++++++++++++++
 .../ZEPPELIN/0.6.0.2.5/kerberos.json            |  17 ++
 .../ZEPPELIN/0.6.0.2.5/metainfo.xml             |  64 ++++++
 .../package/scripts/alert_check_zeppelin.py     |  26 +++
 .../0.6.0.2.5/package/scripts/master.py         | 216 +++++++++++++++++++
 .../0.6.0.2.5/package/scripts/params.py         | 155 +++++++++++++
 .../0.6.0.2.5/package/scripts/setup_snapshot.sh |  92 ++++++++
 .../0.6.0.2.5/package/scripts/status_params.py  |  29 +++
 .../stacks/HDP/2.5/role_command_order.json      |   8 +
 .../HDP/2.5/services/ZEPPELIN/metainfo.xml      |  46 ++++
 contrib/views/pom.xml                           |   1 +
 contrib/views/zeppelin/pom.xml                  | 160 ++++++++++++++
 .../ambari/view/zeppelin/ZeppelinServlet.java   |  57 +++++
 .../src/main/resources/WEB-INF/index.jsp        |  56 +++++
 .../zeppelin/src/main/resources/WEB-INF/web.xml |  38 ++++
 .../views/zeppelin/src/main/resources/view.xml  |  48 +++++
 18 files changed, 1373 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/alerts.json b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/alerts.json
new file mode 100644
index 0000000..8e9b6e7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/alerts.json
@@ -0,0 +1,18 @@
+{
+  "ZEPPELIN": {
+    "service": [],
+    "ZEPPELIN_MASTER": [
+      {
+        "name": "zeppelin_server_status",
+        "label": "Zeppelin Server Status",
+        "description": "This host-level alert is triggered if the Zeppelin server cannot be determined to be up and responding to client requests.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "SCRIPT",
+          "path": "ZEPPELIN/0.6.0.2.5/package/scripts/alert_check_zeppelin.py"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-config.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-config.xml b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-config.xml
new file mode 100644
index 0000000..aa799d3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-config.xml
@@ -0,0 +1,179 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+
+
+    <!-- contents of actual zeppelin-site.xml -->
+
+    <property>
+        <name>zeppelin.server.addr</name>
+        <value>0.0.0.0</value>
+        <description>Server address</description>
+    </property>
+
+    <property>
+        <name>zeppelin.server.port</name>
+        <value>9995</value>
+        <description>Server port.The subsequent port (e.g. 9996) should also be open as it will be
+            used by the web socket
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.notebook.dir</name>
+        <value>notebook</value>
+        <description>notebook persist</description>
+    </property>
+
+    <property>
+        <name>zeppelin.notebook.homescreen</name>
+        <value> </value>
+        <description>id of notebook to be displayed in homescreen. e.g.) 2A94M5J1Z Empty value
+            displays default home screen
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.notebook.homescreen.hide</name>
+        <value>false</value>
+        <description>hide homescreen notebook from list when this value set to true</description>
+    </property>
+
+
+    <property>
+        <name>zeppelin.notebook.s3.user</name>
+        <value>user</value>
+        <description>user name for s3 folder structure. If S3 is used to store the notebooks, it is
+            necessary to use the following folder structure bucketname/username/notebook/
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.notebook.s3.bucket</name>
+        <value>zeppelin</value>
+        <description>bucket name for notebook storage. If S3 is used to store the notebooks, it is
+            necessary to use the following folder structure bucketname/username/notebook/
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.notebook.storage</name>
+        <value>org.apache.zeppelin.notebook.repo.VFSNotebookRepo</value>
+        <description>notebook persistence layer implementation. If S3 is used, set this to
+            org.apache.zeppelin.notebook.repo.S3NotebookRepo instead. If S3 is used to store the
+            notebooks, it is necessary to use the following folder structure
+            bucketname/username/notebook/
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.interpreter.dir</name>
+        <value>interpreter</value>
+        <description>Interpreter implementation base directory</description>
+    </property>
+
+    <property>
+        <name>zeppelin.interpreters</name>
+        <value>org.apache.zeppelin.spark.SparkInterpreter,org.apache.zeppelin.spark.PySparkInterpreter,org.apache.zeppelin.spark.SparkSqlInterpreter,org.apache.zeppelin.spark.DepInterpreter,org.apache.zeppelin.markdown.Markdown,org.apache.zeppelin.angular.AngularInterpreter,org.apache.zeppelin.shell.ShellInterpreter,org.apache.zeppelin.hive.HiveInterpreter,org.apache.zeppelin.tajo.TajoInterpreter,org.apache.zeppelin.flink.FlinkInterpreter,org.apache.zeppelin.lens.LensInterpreter,org.apache.zeppelin.ignite.IgniteInterpreter,org.apache.zeppelin.ignite.IgniteSqlInterpreter,org.apache.zeppelin.cassandra.CassandraInterpreter,org.apache.zeppelin.geode.GeodeOqlInterpreter,org.apache.zeppelin.postgresql.PostgreSqlInterpreter,org.apache.zeppelin.phoenix.PhoenixInterpreter,org.apache.zeppelin.kylin.KylinInterpreter,org.apache.zeppelin.livy.LivySparkInterpreter,org.apache.zeppelin.livy.LivyPySparkInterpreter,org.apache.zeppelin.livy.LivySparkRInterpreter,org.apache.zeppelin.livy.LivySparkSQLIn
 terpreter</value>
+        <description>Comma separated interpreter configurations. First interpreter become a
+            default
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.interpreter.connect.timeout</name>
+        <value>30000</value>
+        <description>Interpreter process connect timeout in msec.</description>
+    </property>
+
+    <property>
+        <name>zeppelin.ssl</name>
+        <value>false</value>
+        <description>Should SSL be used by the servers?</description>
+    </property>
+
+
+    <property>
+        <name>zeppelin.ssl.client.auth</name>
+        <value>false</value>
+        <description>Should client authentication be used for SSL connections?</description>
+    </property>
+
+    <property>
+        <name>zeppelin.ssl.keystore.path</name>
+        <value>conf/keystore</value>
+        <description>Path to keystore relative to Zeppelin home</description>
+    </property>
+
+    <property>
+        <name>zeppelin.ssl.keystore.type</name>
+        <value>JKS</value>
+        <description>The format of the given keystore (e.g. JKS or PKCS12)</description>
+    </property>
+
+    <property>
+        <name>zeppelin.ssl.keystore.password</name>
+        <value>change me</value>
+        <description>Keystore password. Can be obfuscated by the Jetty Password tool</description>
+    </property>
+
+
+    <property>
+        <name>zeppelin.ssl.key.manager.password</name>
+        <value>change me</value>
+        <description>Key Manager password. Defaults to keystore password. Can be obfuscated.
+        </description>
+    </property>
+
+
+    <property>
+        <name>zeppelin.ssl.truststore.path</name>
+        <value>conf/truststore</value>
+        <description>Path to truststore relative to Zeppelin home. Defaults to the keystore path
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.ssl.truststore.type</name>
+        <value>JKS</value>
+        <description>The format of the given truststore (e.g. JKS or PKCS12). Defaults to the same
+            type as the keystore type
+        </description>
+    </property>
+
+
+    <property>
+        <name>zeppelin.ssl.truststore.password</name>
+        <value>change me</value>
+        <description>Truststore password. Can be obfuscated by the Jetty Password tool. Defaults to
+            the keystore password
+        </description>
+    </property>
+
+    <property>
+        <name>zeppelin.server.allowed.origins</name>
+        <value>*</value>
+        <description>Allowed sources for REST and WebSocket requests (i.e.
+            http://onehost:8080,http://otherhost.com). If you leave * you are vulnerable to
+            https://issues.apache.org/jira/browse/ZEPPELIN-173
+        </description>
+    </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
new file mode 100644
index 0000000..d99dc2b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-env.xml
@@ -0,0 +1,163 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+
+ <property>
+    <name>zeppelin_pid_dir</name>
+    <value>/var/run/zeppelin-notebook</value>
+    <description>Dir containing process ID file</description>
+  </property>
+
+  <property>
+    <name>zeppelin_user</name>
+    <value>zeppelin</value>
+    <property-type>USER</property-type>
+    <description>User zeppelin daemon runs as</description>
+  </property>
+
+  <property>
+    <name>zeppelin_group</name>
+    <value>zeppelin</value>
+    <property-type>GROUP</property-type>
+    <description>zeppelin group</description>
+  </property>
+
+  <property>
+    <name>zeppelin_log_dir</name>
+    <value>/var/log/zeppelin</value>
+    <description>Zeppelin Log dir</description>
+  </property>
+
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for zeppelin-env.sh file</description>
+    <value>
+# Spark master url. eg. spark://master_addr:7077. Leave empty if you want to use local mode
+export MASTER=yarn-client
+export SPARK_YARN_JAR={{spark_jar}}
+
+
+# Where log files are stored.  PWD by default.
+export ZEPPELIN_LOG_DIR={{zeppelin_log_dir}}
+
+# The pid files are stored. /tmp by default.
+export ZEPPELIN_PID_DIR={{zeppelin_pid_dir}}
+
+
+export JAVA_HOME={{java64_home}}
+
+# Additional jvm options. for example, export ZEPPELIN_JAVA_OPTS="-Dspark.executor.memory=8g -Dspark.cores.max=16"
+export ZEPPELIN_JAVA_OPTS="-Dhdp.version={{hdp_version}} -Dspark.executor.memory={{executor_mem}} -Dspark.executor.instances={{executor_instances}} -Dspark.yarn.queue={{spark_queue}}"
+
+
+# Zeppelin jvm mem options Default -Xmx1024m -XX:MaxPermSize=512m
+# export ZEPPELIN_MEM
+
+# zeppelin interpreter process jvm mem options. Defualt = ZEPPELIN_MEM
+# export ZEPPELIN_INTP_MEM
+
+# zeppelin interpreter process jvm options. Default = ZEPPELIN_JAVA_OPTS
+# export ZEPPELIN_INTP_JAVA_OPTS
+
+# Where notebook saved
+# export ZEPPELIN_NOTEBOOK_DIR
+
+# Id of notebook to be displayed in homescreen. ex) 2A94M5J1Z
+# export ZEPPELIN_NOTEBOOK_HOMESCREEN
+
+# hide homescreen notebook from list when this value set to "true". default "false"
+# export ZEPPELIN_NOTEBOOK_HOMESCREEN_HIDE
+
+# Bucket where notebook saved
+# export ZEPPELIN_NOTEBOOK_S3_BUCKET
+
+# User in bucket where notebook saved. For example bucket/user/notebook/2A94M5J1Z/note.json
+# export ZEPPELIN_NOTEBOOK_S3_USER
+
+# A string representing this instance of zeppelin. $USER by default
+# export ZEPPELIN_IDENT_STRING
+
+# The scheduling priority for daemons. Defaults to 0.
+# export ZEPPELIN_NICENESS
+
+
+#### Spark interpreter configuration ####
+
+## Use provided spark installation ##
+## defining SPARK_HOME makes Zeppelin run spark interpreter process using spark-submit
+##
+# (required) When it is defined, load it instead of Zeppelin embedded Spark libraries
+export SPARK_HOME={{spark_home}}
+
+# (optional) extra options to pass to spark submit. eg) "--driver-memory 512M --executor-memory 1G".
+# export SPARK_SUBMIT_OPTIONS
+
+## Use embedded spark binaries ##
+## without SPARK_HOME defined, Zeppelin still able to run spark interpreter process using embedded spark binaries.
+## however, it is not encouraged when you can define SPARK_HOME
+##
+# Options read in YARN client mode
+# yarn-site.xml is located in configuration directory in HADOOP_CONF_DIR.
+export HADOOP_CONF_DIR=/etc/hadoop/conf
+
+# Pyspark (supported with Spark 1.2.1 and above)
+# To configure pyspark, you need to set spark distribution's path to 'spark.home' property in Interpreter setting screen in Zeppelin GUI
+# path to the python command. must be the same path on the driver(Zeppelin) and all workers.
+# export PYSPARK_PYTHON
+
+export PYTHONPATH="${SPARK_HOME}/python:${SPARK_HOME}/python/lib/py4j-0.8.2.1-src.zip"
+export SPARK_YARN_USER_ENV="PYTHONPATH=${PYTHONPATH}"
+
+## Spark interpreter options ##
+##
+# Use HiveContext instead of SQLContext if set true. true by default.
+# export ZEPPELIN_SPARK_USEHIVECONTEXT
+
+# Execute multiple SQL concurrently if set true. false by default.
+# export ZEPPELIN_SPARK_CONCURRENTSQL
+
+# Max number of SparkSQL result to display. 1000 by default.
+# export ZEPPELIN_SPARK_MAXRESULT
+
+  </value>
+  </property>
+
+  <property>
+    <name>zeppelin.executor.mem</name>
+    <value>512m</value>
+    <description>Executor memory to use (e.g. 512m or 1g)</description>
+  </property>
+
+  <property>
+    <name>zeppelin.executor.instances</name>
+    <value>2</value>
+    <description>Number of executor instances to use (e.g. 2)</description>
+  </property>
+
+  <property>
+    <name>zeppelin.spark.jar.dir</name>
+    <value>/apps/zeppelin</value>
+    <description>Shared location where zeppelin spark jar will be copied to. Should be accesible
+      by all cluster nodes
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/kerberos.json b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/kerberos.json
new file mode 100644
index 0000000..f69ae45
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/kerberos.json
@@ -0,0 +1,17 @@
+{
+  "services": [
+    {
+      "name": "ZEPPELIN",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "ZEPPELIN_MASTER"
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/metainfo.xml b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/metainfo.xml
new file mode 100644
index 0000000..866d746
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/metainfo.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZEPPELIN</name>
+      <displayName>Zeppelin Notebook</displayName>
+      <comment>A web-based notebook that enables interactive data analytics. It enables you to
+        make beautiful data-driven, interactive and collaborative documents with SQL, Scala
+        and more.
+      </comment>
+      <version>0.6.0.2.5</version>
+      <components>
+        <component>
+          <name>ZEPPELIN_MASTER</name>
+          <displayName>Zeppelin Notebook</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/master.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>10000</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>zeppelin</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <requiredServices>
+        <service>SPARK</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>zeppelin-config</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterChange>false</restartRequiredAfterChange>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/alert_check_zeppelin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/alert_check_zeppelin.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/alert_check_zeppelin.py
new file mode 100644
index 0000000..90c9569
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/alert_check_zeppelin.py
@@ -0,0 +1,26 @@
+import glob
+import sys
+
+from resource_management.core.exceptions import ComponentIsNotRunning
+
+reload(sys)
+sys.setdefaultencoding('utf8')
+config = Script.get_config()
+
+zeppelin_pid_dir = config['configurations']['zeppelin-env']['zeppelin_pid_dir']
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  try:
+    pid_file = glob.glob(zeppelin_pid_dir + '/zeppelin-*.pid')[0]
+    check_process_status(pid_file)
+  except ComponentIsNotRunning as ex:
+    return (RESULT_CODE_CRITICAL, [str(ex)])
+  except:
+    return (RESULT_CODE_CRITICAL, ["Zeppelin is not running"])
+
+  return (RESULT_CODE_OK, ["Successful connection to Zeppelin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
new file mode 100644
index 0000000..c24b3fa
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
@@ -0,0 +1,216 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import glob
+import grp
+import os
+import pwd
+import sys
+from resource_management.core.resources import Directory
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import InlineTemplate
+from resource_management.libraries import XmlConfig
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.script.script import Script
+
+class Master(Script):
+  def install(self, env):
+    import params
+    env.set_params(params)
+
+    Execute('chmod +x ' + params.service_packagedir + "/scripts/setup_snapshot.sh")
+
+    # Create user and group if they don't exist
+    self.create_linux_user(params.zeppelin_user, params.zeppelin_group)
+    self.install_packages(env)
+
+    Execute('chown -R ' + params.zeppelin_user + ':' + params.zeppelin_group + ' ' + params.zeppelin_dir)
+
+    # create the log, pid, zeppelin dirs
+    Directory([params.zeppelin_pid_dir, params.zeppelin_log_dir, params.zeppelin_dir],
+              owner=params.zeppelin_user,
+              group=params.zeppelin_group,
+              cd_access="a",
+              mode=0755
+              )
+
+    Execute('echo spark_version:' + params.spark_version + ' detected for spark_home: '
+            + params.spark_home + ' >> ' + params.zeppelin_log_file, user=params.zeppelin_user)
+
+    # update the configs specified by user
+    self.configure(env)
+
+    # run setup_snapshot.sh
+    Execute(format("{service_packagedir}/scripts/setup_snapshot.sh {zeppelin_dir} "
+                   "{hive_metastore_host} {hive_metastore_port} {hive_server_port} "
+                   "{zeppelin_host} {zeppelin_port} {setup_view} {service_packagedir} "
+                   "{java64_home} >> {zeppelin_log_file}"),
+            user=params.zeppelin_user)
+
+  def create_linux_user(self, user, group):
+    try:
+      pwd.getpwnam(user)
+    except KeyError:
+      Execute('adduser ' + user)
+    try:
+      grp.getgrnam(group)
+    except KeyError:
+      Execute('groupadd ' + group)
+
+  def create_zeppelin_dir(self, params):
+    params.HdfsResource(format("/user/{zeppelin_user}"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.zeppelin_user,
+                        recursive_chown=True,
+                        recursive_chmod=True
+                        )
+    params.HdfsResource(format("/user/{zeppelin_user}/test"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.zeppelin_user,
+                        recursive_chown=True,
+                        recursive_chmod=True
+                        )
+    params.HdfsResource(format("/apps/zeppelin"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.zeppelin_user,
+                        recursive_chown=True,
+                        recursive_chmod=True
+                        )
+
+    spark_deps_full_path = glob.glob(params.zeppelin_dir + '/interpreter/spark/dep/zeppelin-spark-dependencies-*.jar')[0]
+    spark_dep_file_name = os.path.basename(spark_deps_full_path);
+
+    params.HdfsResource(params.spark_jar_dir + "/" + spark_dep_file_name,
+                        type="file",
+                        action="create_on_execute",
+                        source=spark_deps_full_path,
+                        group=params.zeppelin_group,
+                        owner=params.zeppelin_user,
+                        mode=0444,
+                        replace_existing_files=True,
+                        )
+
+    params.HdfsResource(None, action="execute")
+
+  def configure(self, env):
+    import params
+    import status_params
+    env.set_params(params)
+    env.set_params(status_params)
+
+    # write out zeppelin-site.xml
+    XmlConfig("zeppelin-site.xml",
+              conf_dir=params.conf_dir,
+              configurations=params.config['configurations']['zeppelin-config'],
+              owner=params.zeppelin_user,
+              group=params.zeppelin_group
+              )
+    # write out zeppelin-env.sh
+    env_content = InlineTemplate(params.zeppelin_env_content)
+    File(format("{params.conf_dir}/zeppelin-env.sh"), content=env_content,
+         owner=params.zeppelin_user, group=params.zeppelin_group)  # , mode=0777)
+
+  def stop(self, env):
+    import params
+    Execute(params.zeppelin_dir + '/bin/zeppelin-daemon.sh stop >> ' + params.zeppelin_log_file,
+            user=params.zeppelin_user)
+
+  def start(self, env):
+    import params
+    import status_params
+    self.configure(env)
+
+    if glob.glob(
+            params.zeppelin_dir + '/interpreter/spark/dep/zeppelin-spark-dependencies-*.jar') and os.path.exists(
+      glob.glob(params.zeppelin_dir + '/interpreter/spark/dep/zeppelin-spark-dependencies-*.jar')[0]):
+      self.create_zeppelin_dir(params)
+
+    Execute(params.zeppelin_dir + '/bin/zeppelin-daemon.sh start >> '
+            + params.zeppelin_log_file, user=params.zeppelin_user)
+    pidfile = glob.glob(status_params.zeppelin_pid_dir
+                        + '/zeppelin-' + params.zeppelin_user + '*.pid')[0]
+    Execute('echo pid file is: ' + pidfile, user=params.zeppelin_user)
+    contents = open(pidfile).read()
+    Execute('echo pid is ' + contents, user=params.zeppelin_user)
+
+    # if first_setup:
+    import time
+    time.sleep(20)
+    self.update_zeppelin_interpreter()
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    pid_file = glob.glob(status_params.zeppelin_pid_dir + '/zeppelin-'
+                         + status_params.zeppelin_user + '*.pid')[0]
+    check_process_status(pid_file)
+
+  def update_zeppelin_interpreter(self):
+    import params
+    import json, urllib2
+    zeppelin_int_url = 'http://' + params.zeppelin_host + ':' + str(
+      params.zeppelin_port) + '/api/interpreter/setting/'
+
+    # fetch current interpreter settings for spark, hive, phoenix
+    data = json.load(urllib2.urlopen(zeppelin_int_url))
+    print data
+    for body in data['body']:
+      if body['group'] == 'spark':
+        sparkbody = body
+      elif body['group'] == 'hive':
+        hivebody = body
+      elif body['group'] == 'phoenix':
+        phoenixbody = body
+
+    # if hive installed, update hive settings and post to hive interpreter
+    if (params.hive_server_host):
+      hivebody['properties']['hive.hiveserver2.url'] = 'jdbc:hive2://' \
+                                                       + params.hive_server_host \
+                                                       + ':' + params.hive_server_port
+      self.post_request(zeppelin_int_url + hivebody['id'], hivebody)
+
+    # if hbase installed, update hbase settings and post to phoenix interpreter
+    if (params.zookeeper_znode_parent and params.hbase_zookeeper_quorum):
+      phoenixbody['properties'][
+        'phoenix.jdbc.url'] = "jdbc:phoenix:" + params.hbase_zookeeper_quorum + ':' \
+                              + params.zookeeper_znode_parent
+      self.post_request(zeppelin_int_url + phoenixbody['id'], phoenixbody)
+
+  def post_request(self, url, body):
+    import json, urllib2
+    encoded_body = json.dumps(body)
+    req = urllib2.Request(str(url), encoded_body)
+    req.get_method = lambda: 'PUT'
+    try:
+      response = urllib2.urlopen(req, encoded_body).read()
+    except urllib2.HTTPError, error:
+      print 'Exception: ' + error.read()
+
+    jsonresp = json.loads(response.decode('utf-8'))
+    print jsonresp['status']
+
+
+if __name__ == "__main__":
+  Master().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/params.py
new file mode 100644
index 0000000..615f63e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/params.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import functools
+import os
+import re
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.get_stack_version import get_stack_version
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.script.script import Script
+
+
+def get_port_from_url(address):
+  if not (address is None):
+    return address.split(':')[-1]
+  else:
+    return address
+
+
+# server configurations
+config = Script.get_config()
+
+# e.g. /var/lib/ambari-agent/cache/stacks/HDP/2.2/services/zeppelin-stack/package
+service_packagedir = os.path.realpath(__file__).split('/scripts')[0]
+
+zeppelin_dirname = 'zeppelin-server/lib'
+
+install_dir = '/usr/hdp/current'
+executor_mem = config['configurations']['zeppelin-env']['zeppelin.executor.mem']
+executor_instances = config['configurations']['zeppelin-env'][
+  'zeppelin.executor.instances']
+
+spark_jar_dir = config['configurations']['zeppelin-env']['zeppelin.spark.jar.dir']
+spark_jar = format("{spark_jar_dir}/zeppelin-spark-0.5.5-SNAPSHOT.jar")
+setup_view = True
+temp_file = config['configurations']['zeppelin-env']['zeppelin.temp.file']
+spark_home = "/usr/hdp/current/spark-client/"
+
+try:
+  fline = open(spark_home + "/RELEASE").readline().rstrip()
+  spark_version = re.search('Spark (\d\.\d).+', fline).group(1)
+except:
+  pass
+
+# params from zeppelin-config
+zeppelin_port = str(config['configurations']['zeppelin-config']['zeppelin.server.port'])
+
+# params from zeppelin-env
+zeppelin_user = config['configurations']['zeppelin-env']['zeppelin_user']
+zeppelin_group = config['configurations']['zeppelin-env']['zeppelin_group']
+zeppelin_log_dir = config['configurations']['zeppelin-env']['zeppelin_log_dir']
+zeppelin_pid_dir = config['configurations']['zeppelin-env']['zeppelin_pid_dir']
+zeppelin_log_file = os.path.join(zeppelin_log_dir, 'zeppelin-setup.log')
+zeppelin_hdfs_user_dir = format("/user/{zeppelin_user}")
+
+zeppelin_dir = os.path.join(*[install_dir, zeppelin_dirname])
+conf_dir = os.path.join(*[install_dir, zeppelin_dirname, 'conf'])
+notebook_dir = os.path.join(*[install_dir, zeppelin_dirname, 'notebook'])
+
+# zeppelin-env.sh
+zeppelin_env_content = config['configurations']['zeppelin-env']['content']
+
+# detect configs
+master_configs = config['clusterHostInfo']
+java64_home = config['hostLevelParams']['java_home']
+ambari_host = str(master_configs['ambari_server_host'][0])
+zeppelin_host = str(master_configs['zeppelin_master_hosts'][0])
+
+# detect HS2 details, if installed
+
+if 'hive_server_host' in master_configs and len(master_configs['hive_server_host']) != 0:
+  hive_server_host = str(master_configs['hive_server_host'][0])
+  hive_metastore_host = str(master_configs['hive_metastore_host'][0])
+  hive_metastore_port = str(
+    get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']))
+  hive_server_port = str(config['configurations']['hive-site']['hive.server2.thrift.http.port'])
+else:
+  hive_server_host = None
+  hive_metastore_host = '0.0.0.0'
+  hive_metastore_port = None
+  hive_server_port = None
+
+# detect hbase details if installed
+if 'hbase_master_hosts' in master_configs and 'hbase-site' in config['configurations']:
+  zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+  hbase_zookeeper_quorum = config['configurations']['hbase-site']['hbase.zookeeper.quorum']
+else:
+  zookeeper_znode_parent = None
+  hbase_zookeeper_quorum = None
+
+# detect spark queue
+if 'spark.yarn.queue' in config['configurations']['spark-defaults']:
+  spark_queue = config['configurations']['spark-defaults']['spark.yarn.queue']
+else:
+  spark_queue = 'default'
+
+# e.g. 2.3
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+
+# e.g. 2.3.0.0
+hdp_stack_version = format_stack_version(stack_version_unformatted)
+
+# e.g. 2.3.0.0-2130
+full_version = default("/commandParams/version", None)
+hdp_version = full_version
+
+spark_client_version = get_stack_version('spark-client')
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+# create partial functions with common arguments for every HdfsResource call
+# to create hdfs directory we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file="/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled=security_enabled,
+  keytab=hdfs_user_keytab,
+  kinit_path_local=kinit_path_local,
+  hadoop_bin_dir=hadoop_bin_dir,
+  hadoop_conf_dir=hadoop_conf_dir,
+  principal_name=hdfs_principal_name,
+  hdfs_site=hdfs_site,
+  default_fs=default_fs
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/setup_snapshot.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/setup_snapshot.sh b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/setup_snapshot.sh
new file mode 100644
index 0000000..d81369f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/setup_snapshot.sh
@@ -0,0 +1,92 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+set -e
+#e.g. /opt/incubator-zeppelin
+export INSTALL_DIR=$1
+
+#e.g. sandbox.hortonworks.com
+export HIVE_METASTORE_HOST=$2
+
+#e.g. 9083
+export HIVE_METASTORE_PORT=$3
+
+#e.g. 10000
+export HIVE_SERVER_PORT=$4
+
+export ZEPPELIN_HOST=$5
+
+export ZEPPELIN_PORT=$6
+
+#if true, will setup Ambari view and import notebooks
+export SETUP_VIEW=$7
+
+export PACKAGE_DIR=$8
+export java64_home=$9
+
+SETUP_VIEW=${SETUP_VIEW,,}
+echo "SETUP_VIEW is $SETUP_VIEW"
+
+SetupZeppelin () {
+
+  echo "Setting up zeppelin at $INSTALL_DIR"
+  cd $INSTALL_DIR
+
+  rm -rf notebook/*
+
+  if [ "$HIVE_METASTORE_HOST" != "0.0.0.0" ]
+  then
+    echo "Hive metastore detected: $HIVE_METASTORE_HOST. Setting up conf/hive-site.xml"
+    echo "<configuration>" > conf/hive-site.xml
+    echo "<property>" >> conf/hive-site.xml
+    echo "   <name>hive.metastore.uris</name>" >> conf/hive-site.xml
+    echo "   <value>thrift://$HIVE_METASTORE_HOST:$HIVE_METASTORE_PORT</value>" >> conf/hive-site.xml
+    echo "</property>" >> conf/hive-site.xml
+    echo "<property>" >> conf/hive-site.xml
+    echo "   <name>hive.server2.thrift.http.port</name>" >> conf/hive-site.xml
+    echo "   <value>$HIVE_SERVER_PORT</value>" >> conf/hive-site.xml
+    echo "</property>" >> conf/hive-site.xml
+    echo "</configuration>" >> conf/hive-site.xml
+  else
+    echo "HIVE_METASTORE_HOST is $HIVE_METASTORE_HOST: Skipping hive-site.xml setup as Hive does not seem to be installed"
+  fi
+
+  if [[ $SETUP_VIEW == "true" ]]
+  then
+    echo "Importing notebooks"
+    mkdir -p notebook
+    cd notebook
+    wget https://github.com/hortonworks-gallery/zeppelin-notebooks/archive/master.zip -O notebooks.zip
+    unzip notebooks.zip
+
+    if [ -d "zeppelin-notebooks-master" ]; then
+      mv zeppelin-notebooks-master/* .
+      rm -rf zeppelin-notebooks-master
+    fi
+
+    cd ..
+  else
+    echo "Skipping import of sample notebooks"
+  fi
+
+}
+
+SetupZeppelin
+echo "Setup complete"

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/status_params.py
new file mode 100644
index 0000000..35360c6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/status_params.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script import Script
+
+config = Script.get_config()
+
+zeppelin_pid_dir = config['configurations']['zeppelin-env']['zeppelin_pid_dir']
+zeppelin_user = config['configurations']['zeppelin-env']['zeppelin_user']
+zeppelin_group = config['configurations']['zeppelin-env']['zeppelin_group']
+zeppelin_log_dir = config['configurations']['zeppelin-env']['zeppelin_log_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/ambari-server/src/main/resources/stacks/HDP/2.5/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.5/role_command_order.json
new file mode 100644
index 0000000..a9e3782
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/role_command_order.json
@@ -0,0 +1,8 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "ZEPPELIN_MASTER-START" : ["NAMENODE-START"]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/ambari-server/src/main/resources/stacks/HDP/2.5/services/ZEPPELIN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ZEPPELIN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ZEPPELIN/metainfo.xml
new file mode 100644
index 0000000..92e9401
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ZEPPELIN/metainfo.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>ZEPPELIN</name>
+            <version>0.6.0.2.5</version>
+            <extends>common-services/ZEPPELIN/0.6.0.2.5</extends>
+            <osSpecifics>
+                <osSpecific>
+                    <osFamily>redhat7,amazon2015,redhat6,suse11</osFamily>
+                    <packages>
+                        <package>
+                            <name>zeppelin_${stack_version}</name>
+                        </package>
+                    </packages>
+                </osSpecific>
+                <osSpecific>
+                    <osFamily>debian7,ubuntu12,ubuntu14</osFamily>
+                    <packages>
+                        <package>
+                            <name>zeppelin-${stack_version}</name>
+                        </package>
+                    </packages>
+                </osSpecific>
+            </osSpecifics>
+        </service>
+    </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/contrib/views/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/pom.xml b/contrib/views/pom.xml
index eb1972c..18141c6 100644
--- a/contrib/views/pom.xml
+++ b/contrib/views/pom.xml
@@ -42,6 +42,7 @@
     <module>capacity-scheduler</module>
     <module>tez</module>
     <module>storm</module>
+    <module>zeppelin</module>
   </modules>
   <build>
     <pluginManagement>

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/contrib/views/zeppelin/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/zeppelin/pom.xml b/contrib/views/zeppelin/pom.xml
new file mode 100644
index 0000000..3d0161c
--- /dev/null
+++ b/contrib/views/zeppelin/pom.xml
@@ -0,0 +1,160 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.apache.ambari.contrib.views</groupId>
+  <artifactId>zeppelin-view</artifactId>
+  <version>1.0.0.0</version>
+  <name>Zeppelin</name>
+
+  <parent>
+    <groupId>org.apache.ambari.contrib.views</groupId>
+    <artifactId>ambari-contrib-views</artifactId>
+    <version>2.0.0.0-SNAPSHOT</version>
+  </parent>
+
+  <dependencies>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>4.8.1</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.easymock</groupId>
+      <artifactId>easymock</artifactId>
+      <version>3.1</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.ambari</groupId>
+      <artifactId>ambari-views</artifactId>
+      <version>[1.7.0.0,)</version>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-server</artifactId>
+      <version>1.8</version>
+    </dependency>
+    <dependency>
+      <groupId>javax.servlet</groupId>
+      <artifactId>servlet-api</artifactId>
+      <version>2.5</version>
+      <scope>provided</scope>
+    </dependency>
+
+
+    <dependency>
+      <groupId>commons-configuration</groupId>
+      <artifactId>commons-configuration</artifactId>
+      <version>1.6</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-csv</artifactId>
+      <version>1.0</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-collections4</artifactId>
+      <version>4.0</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+      <version>1.7.5</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.ambari.contrib.views</groupId>
+      <artifactId>ambari-views-utils</artifactId>
+      <version>2.0.0.0-SNAPSHOT</version>
+    </dependency>
+
+  </dependencies>
+
+  <properties>
+    <ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
+    <hive-version>1.0.0</hive-version>
+    <ambari.version>1.3.0.0-SNAPSHOT</ambari.version>
+  </properties>
+  <build>
+    <plugins>
+
+      <!-- Building frontend -->
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.1</version>
+        <configuration>
+          <source>1.7</source>
+          <target>1.7</target>
+        </configuration>
+      </plugin>
+      <plugin>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <executions>
+          <execution>
+            <phase>generate-resources</phase>
+            <goals>
+              <goal>copy-dependencies</goal>
+            </goals>
+            <configuration>
+              <outputDirectory>${project.build.directory}/lib</outputDirectory>
+              <includeScope>runtime</includeScope>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.vafer</groupId>
+        <artifactId>jdeb</artifactId>
+        <version>1.0.1</version>
+        <executions>
+          <execution>
+            <phase>none</phase>
+            <goals>
+              <goal>jdeb</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <submodules>false</submodules>
+        </configuration>
+      </plugin>
+    </plugins>
+    <resources>
+      <resource>
+        <directory>src/main/resources</directory>
+        <filtering>true</filtering>
+        <includes>
+          <include>WEB-INF/web.xml</include>
+          <include>META-INF/**/*</include>
+          <include>view.xml</include>
+          <include>WEB-INF/index.jsp</include>
+        </includes>
+      </resource>
+      <resource>
+        <targetPath>WEB-INF/lib</targetPath>
+        <filtering>false</filtering>
+        <directory>target/lib</directory>
+      </resource>
+    </resources>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/contrib/views/zeppelin/src/main/java/org/apache/ambari/view/zeppelin/ZeppelinServlet.java
----------------------------------------------------------------------
diff --git a/contrib/views/zeppelin/src/main/java/org/apache/ambari/view/zeppelin/ZeppelinServlet.java b/contrib/views/zeppelin/src/main/java/org/apache/ambari/view/zeppelin/ZeppelinServlet.java
new file mode 100644
index 0000000..f497599
--- /dev/null
+++ b/contrib/views/zeppelin/src/main/java/org/apache/ambari/view/zeppelin/ZeppelinServlet.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.zeppelin;
+
+import org.apache.ambari.view.ViewContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.ServletConfig;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+
+
+public class ZeppelinServlet extends HttpServlet {
+    private ViewContext viewContext;
+    private final static Logger LOG = LoggerFactory.getLogger(ZeppelinServlet.class);
+
+    @Override
+    public void init(ServletConfig config) throws ServletException {
+        super.init(config);
+
+        ServletContext context = config.getServletContext();
+        viewContext = (ViewContext) context.getAttribute(ViewContext.CONTEXT_ATTRIBUTE);
+    }
+
+    @Override
+    protected void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException {
+        response.setContentType("text/html");
+        response.setStatus(HttpServletResponse.SC_OK);
+
+        String port = viewContext.getProperties().get("zeppelin.server.port");
+        String publicName = viewContext.getProperties().get("zeppelin.host.publicname");
+        request.setAttribute("port", port);
+
+        request.getRequestDispatcher("WEB-INF/index.jsp").forward(request, response);
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/contrib/views/zeppelin/src/main/resources/WEB-INF/index.jsp
----------------------------------------------------------------------
diff --git a/contrib/views/zeppelin/src/main/resources/WEB-INF/index.jsp b/contrib/views/zeppelin/src/main/resources/WEB-INF/index.jsp
new file mode 100644
index 0000000..5c57343
--- /dev/null
+++ b/contrib/views/zeppelin/src/main/resources/WEB-INF/index.jsp
@@ -0,0 +1,56 @@
+<!DOCTYPE html>
+<%--
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+--%>
+<html lang="en">
+<head>
+    <meta charset="utf-8"/>
+</head>
+<body>
+
+<iframe id='zeppelinIFrame' width="100%" seamless="seamless" style="border: 0px;"></iframe>
+<script>
+    var $ = jQuery = parent.jQuery;
+    var iframe = document.querySelector('#zeppelinIFrame');
+    var port = "${port}";
+    var publicName = "${publicname}";
+
+
+    $.getJSON('/api/v1/clusters', function (data) {
+        $.getJSON('/api/v1/clusters/' +
+                data['items'][0]['Clusters']['cluster_name'] +
+                '/hosts?fields=Hosts%2Fpublic_host_name%2Chost_components%2FHostRoles%2Fcomponent_name',
+                function (data) {
+                    for (var i in data['items']) {
+                        for (var j in data['items'][i]['host_components']) {
+                            if (data['items'][i]['host_components'][j]['HostRoles']['component_name'] == 'ZEPPELIN_MASTER') {
+                                var url = '//' + data['items'][i]['host_components'][j]['HostRoles']['host_name'] + ':' + port;
+                                iframe.src = url;
+                                iframe.height = window.innerHeight;
+                                return;
+                            }
+                        }
+                    }
+                });
+    });
+
+    $(window).resize(function () {
+        iframe.height = window.innerHeight;
+    });
+</script>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/contrib/views/zeppelin/src/main/resources/WEB-INF/web.xml
----------------------------------------------------------------------
diff --git a/contrib/views/zeppelin/src/main/resources/WEB-INF/web.xml b/contrib/views/zeppelin/src/main/resources/WEB-INF/web.xml
new file mode 100644
index 0000000..cb0bcd9
--- /dev/null
+++ b/contrib/views/zeppelin/src/main/resources/WEB-INF/web.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="ISO-8859-1" ?>
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. Kerberos, LDAP, Custom. Binary/Htt
+-->
+
+<web-app xmlns="http://java.sun.com/xml/ns/j2ee"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://java.sun.com/xml/ns/j2ee http://java.sun.com/xml/ns/j2ee/web-app_2_4.xsd"
+         version="2.4">
+
+  <display-name>Zeppelin view servlet</display-name>
+  <description>
+    This is the Zeppelin view servlet application.
+  </description>
+  <servlet>
+    <servlet-name>ZeppelinServlet</servlet-name>
+    <servlet-class>org.apache.ambari.view.zeppelin.ZeppelinServlet</servlet-class>
+  </servlet>
+
+  <servlet-mapping>
+    <servlet-name>ZeppelinServlet</servlet-name>
+    <url-pattern>/</url-pattern>
+  </servlet-mapping>
+</web-app>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/76627aa8/contrib/views/zeppelin/src/main/resources/view.xml
----------------------------------------------------------------------
diff --git a/contrib/views/zeppelin/src/main/resources/view.xml b/contrib/views/zeppelin/src/main/resources/view.xml
new file mode 100644
index 0000000..3c5c5cf
--- /dev/null
+++ b/contrib/views/zeppelin/src/main/resources/view.xml
@@ -0,0 +1,48 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. Kerberos, LDAP, Custom. Binary/Htt
+-->
+<view>
+  <name>ZEPPELIN</name>
+  <label>Zeppelin View!</label>
+  <version>1.0.0</version>
+  <build>${env.BUILD_NUMBER}</build>
+  <description>Ambari view for Apache Zeppelin</description>
+
+  <parameter>
+    <name>zeppelin.server.port</name>
+    <description>Zeppelin Http port (example: 9995).</description>
+    <label>Zeppelin Http port</label>
+    <cluster-config>zeppelin-config/zeppelin.server.port</cluster-config>
+    <required>true</required>
+  </parameter>
+  <parameter>
+    <name>zeppelin.host.publicname</name>
+    <description>Zeppelin host name</description>
+    <label>Zeppelin host name</label>
+    <cluster-config>zeppelin-ambari-config/zeppelin.host.publicname</cluster-config>
+    <required>true</required>
+  </parameter>
+
+  <auto-instance>
+    <name>AUTO_ZEPPELIN_INSTANCE</name>
+    <label>Zeppelin View</label>
+    <description>This view instance is auto created when the Zeppelin service is added to a cluster.</description>
+    <stack-id>HDP-2.*</stack-id>
+    <services>
+      <service>ZEPPELIN</service>
+    </services>
+  </auto-instance>
+</view>
\ No newline at end of file


[10/21] ambari git commit: AMBARI-15228. Ambari overwrites permissions on HDFS directories (aonishuk)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
index 6160b6a..2b54a82 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
@@ -36,6 +36,7 @@
     "clusterName": "c1", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "stack_name": "HDP", 
@@ -176,6 +177,7 @@
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
         }, 
         "hadoop-env": {
+            "hdfs_tmp_dir": "/tmp",
             "namenode_opt_maxnewsize": "128m", 
             "hadoop_root_logger": "INFO,RFA", 
             "hdfs_log_dir_prefix": "/var/log/hadoop", 
@@ -195,7 +197,8 @@
             "namenode_opt_permsize": "128m"
         }, 
         "cluster-env": {
-            "security_enabled": "false", 
+            "managed_hdfs_resource_property_names": "",
+            "security_enabled": "false",
             "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
             "ignore_groupsusers_create": "false", 
             "override_uid": "true", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
index 576288c..c2281f9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
@@ -36,6 +36,7 @@
     "clusterName": "c1", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "stack_name": "HDP", 
@@ -176,6 +177,7 @@
             "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
         }, 
         "hadoop-env": {
+            "hdfs_tmp_dir": "/tmp",
             "namenode_opt_maxnewsize": "128m", 
             "hadoop_root_logger": "INFO,RFA", 
             "hdfs_log_dir_prefix": "/var/log/hadoop", 
@@ -195,7 +197,8 @@
             "namenode_opt_permsize": "128m"
         }, 
         "cluster-env": {
-            "security_enabled": "false", 
+            "managed_hdfs_resource_property_names": "",
+            "security_enabled": "false",
             "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
             "ignore_groupsusers_create": "false", 
             "override_uid": "true", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
index 5ae0ff2..d44b002 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
@@ -14,6 +14,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
@@ -145,6 +146,7 @@
             "ipc.client.connection.maxidletime": "30000"
         }, 
         "hadoop-env": {
+            "hdfs_tmp_dir": "/tmp",
             "dtnode_heapsize": "1024m", 
             "namenode_opt_maxnewsize": "256m", 
             "hdfs_log_dir_prefix": "/var/log/hadoop", 
@@ -160,6 +162,7 @@
             "namenode_opt_permsize": "128m"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
index 9e21d9b..9d61915 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
@@ -3,6 +3,7 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
@@ -470,6 +471,7 @@
             "hive_database": "New MySQL Database"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "hdfs_user_principal" : "",
             "hdfs_user_keytab" : "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json
index 8a16d0c..b163a61 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json
@@ -36,6 +36,7 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://hw10897.ix:8080/resources/",
@@ -251,6 +252,7 @@
             "HTTP_SERVICE_PORT": "6080"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",
@@ -330,4 +332,4 @@
             "c6402.ambari.apache.org"
         ]
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
index 34ffede..d9e3a9e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
@@ -3,6 +3,7 @@
     "clusterName": "pacan", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -70,6 +71,7 @@
             "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",
@@ -422,4 +424,4 @@
             "c6402.ambari.apache.org"
         ]
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_secured.json
index a868eeb..597fe94 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_secured.json
@@ -3,6 +3,7 @@
     "clusterName": "pacan", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -70,6 +71,7 @@
             "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "true",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index d83ffc8..c772a0e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -553,6 +554,7 @@
             "is_supported_yarn_ranger": "false"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "true",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",
@@ -563,6 +565,7 @@
             "kinit_path_local": "/usr/bin"
         },
         "hadoop-env": {
+            "hdfs_tmp_dir": "/tmp",
             "namenode_opt_maxnewsize": "200m",
             "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
index 6b7dabc..9c5dae3 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -502,6 +503,7 @@
             "min_user_id": "1000"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "true",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json
index 5913b3e..3eac379 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json
@@ -3,7 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
         "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.0.6/configs/zk-service_check_2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/zk-service_check_2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/zk-service_check_2.2.json
index f3defee..33bcfb9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/zk-service_check_2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/zk-service_check_2.2.json
@@ -11,7 +11,8 @@
     "clusterName": "c1", 
     "hostname": "c6403.ambari.apache.org", 
     "hostLevelParams": {
-        "jdk_location": "http://192.168.64.1:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://192.168.64.1:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45",
         "java_version": "8",
@@ -52,6 +53,7 @@
             "zk_user": "zookeeper"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
index f6efb8c..8c61b0b 100644
--- a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
@@ -31,6 +31,7 @@ class TestFalconServer(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "FALCON/0.5.0.2.1/package"
   STACK_VERSION = "2.1"
   UPGRADE_STACK_VERSION = "2.2"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   def test_start_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
@@ -132,6 +133,7 @@ class TestFalconServer(RMFTestCase):
                               create_parents = True
                               )
     self.assertResourceCalled('HdfsResource', '/apps/falcon',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -149,6 +151,7 @@ class TestFalconServer(RMFTestCase):
         create_parents = True,
     )
     self.assertResourceCalled('HdfsResource', '/apps/data-mirroring',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -167,6 +170,7 @@ class TestFalconServer(RMFTestCase):
     )
 
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -315,6 +319,7 @@ class TestFalconServer(RMFTestCase):
         create_parents = True,
     )
     self.assertResourceCalled('HdfsResource', '/apps/falcon',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
@@ -335,6 +340,7 @@ class TestFalconServer(RMFTestCase):
         create_parents = True,
     )
     self.assertResourceCalled('HdfsResource', '/apps/data-mirroring',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
@@ -355,6 +361,7 @@ class TestFalconServer(RMFTestCase):
         mode = 0770,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.1/TEZ/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/TEZ/test_service_check.py b/ambari-server/src/test/python/stacks/2.1/TEZ/test_service_check.py
index 1d127d2..b1c7d3f 100644
--- a/ambari-server/src/test/python/stacks/2.1/TEZ/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.1/TEZ/test_service_check.py
@@ -24,6 +24,7 @@ from stacks.utils.RMFTestCase import *
 class TestTezServiceCheck(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "TEZ/0.4.0.2.1/package"
   STACK_VERSION = "2.1"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   def test_service_check(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
@@ -39,6 +40,7 @@ class TestTezServiceCheck(RMFTestCase):
     )
 
     self.assertResourceCalled('HdfsResource', '/tmp/tezsmokeoutput',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -51,6 +53,7 @@ class TestTezServiceCheck(RMFTestCase):
     )
 
     self.assertResourceCalled('HdfsResource', '/tmp/tezsmokeinput',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -63,6 +66,7 @@ class TestTezServiceCheck(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/tmp/tezsmokeinput/sample-tez-test',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -76,6 +80,7 @@ class TestTezServiceCheck(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
@@ -116,6 +121,7 @@ class TestTezServiceCheck(RMFTestCase):
                               mode = 0755,
                               )
     self.assertResourceCalled('HdfsResource', '/tmp/tezsmokeoutput',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               security_enabled = True,
                               hadoop_bin_dir = '/usr/bin',
                               keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -130,6 +136,7 @@ class TestTezServiceCheck(RMFTestCase):
                               type = 'directory',
                               )
     self.assertResourceCalled('HdfsResource', '/tmp/tezsmokeinput',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               security_enabled = True,
                               hadoop_bin_dir = '/usr/bin',
                               keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -145,6 +152,7 @@ class TestTezServiceCheck(RMFTestCase):
                               action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               )
     self.assertResourceCalled('HdfsResource', '/tmp/tezsmokeinput/sample-tez-test',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               security_enabled = True,
                               hadoop_bin_dir = '/usr/bin',
                               keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -161,6 +169,7 @@ class TestTezServiceCheck(RMFTestCase):
                               action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               )
     self.assertResourceCalled('HdfsResource', None,
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               security_enabled = True,
                               hadoop_bin_dir = '/usr/bin',
                               keytab = '/etc/security/keytabs/hdfs.headless.keytab',

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
index 132924a..d05c9fc 100644
--- a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
+++ b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
@@ -32,6 +32,7 @@ origin_exists = os.path.exists
 class TestAppTimelineServer(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
   STACK_VERSION = "2.0.6"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   def test_configure_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
@@ -188,6 +189,7 @@ class TestAppTimelineServer(RMFTestCase):
                               cd_access='a'
                               )
     self.assertResourceCalled('HdfsResource', None,
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
                               security_enabled = False,
                               hadoop_bin_dir = '/usr/bin',
                               keytab = UnknownConfigurationMock(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json b/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json
index df13b43..170edd8 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json
@@ -47,6 +47,7 @@
     "clusterName": "c1", 
     "hostname": "c6403.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
@@ -540,6 +541,7 @@
             "zk_user": "zookeeper"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false", 
             "kerberos_domain": "EXAMPLE.COM",
@@ -672,4 +674,4 @@
             "c6402.ambari.apache.org"
         ]
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json b/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
index bfde096..92a7516 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
@@ -31,6 +31,7 @@
     "clusterName": "pacan",
     "hostname": "c6402.ambari.apache.org",
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred",
         "java_home": "/usr/jdk64/jdk1.7.0_45",
@@ -316,6 +317,7 @@
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.1/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/default.json b/ambari-server/src/test/python/stacks/2.1/configs/default.json
index f02c485..de5eee9 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/default.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -660,6 +661,7 @@
             "storm_logs_supported": "false"
         }, 
         "falcon-env": {
+            "falcon_apps_hdfs_dir": "/apps/falcon",
             "falcon_port": "15000", 
             "falcon_pid_dir": "/var/run/falcon", 
             "falcon_log_dir": "/var/log/falcon", 
@@ -694,6 +696,7 @@
             "sqoop_user": "sqoop"
         },
       "cluster-env": {
+        "managed_hdfs_resource_property_names": "",
         "security_enabled": "false",
         "ignore_groupsusers_create": "false",
         "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json b/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
index 888666a..26b0559 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
@@ -23,6 +23,7 @@
     "clusterName": "c1", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "stack_name": "HDP", 
@@ -300,7 +301,8 @@
             "hive.security.authorization.enabled": "false"
         }, 
         "cluster-env": {
-            "security_enabled": "false", 
+            "managed_hdfs_resource_property_names": "",
+            "security_enabled": "false",
             "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
             "ignore_groupsusers_create": "false", 
             "override_uid": "true", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
index f9e518d..9c6e128 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
@@ -31,7 +31,8 @@
     "clusterName": "pacan", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45",
         "java_version": "8",
@@ -331,7 +332,8 @@
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
         }, 
         "cluster-env": {
-            "security_enabled": "true", 
+            "managed_hdfs_resource_property_names": "",
+            "security_enabled": "true",
             "ignore_groupsusers_create": "false", 
             "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
             "kerberos_domain": "EXAMPLE.COM", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.1/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured.json b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
index 1b0a80c..6ddab70 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
@@ -3,6 +3,7 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
@@ -554,6 +555,7 @@
             "is_supported_yarn_ranger": "false"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "true",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py b/ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py
index 20224ab..906c627 100644
--- a/ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py
@@ -24,6 +24,7 @@ from stacks.utils.RMFTestCase import *
 class TestPigServiceCheck(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "PIG/0.12.0.2.0/package"
   STACK_VERSION = "2.2"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
   def test_service_check_secure(self, copy_to_hdfs_mock):
@@ -38,6 +39,7 @@ class TestPigServiceCheck(RMFTestCase):
     )
     
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/pigsmoke.out',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -53,6 +55,7 @@ class TestPigServiceCheck(RMFTestCase):
         action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/passwd',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -69,6 +72,7 @@ class TestPigServiceCheck(RMFTestCase):
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -101,6 +105,7 @@ class TestPigServiceCheck(RMFTestCase):
         conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/pigsmoke.out',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -116,6 +121,7 @@ class TestPigServiceCheck(RMFTestCase):
         action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/passwd',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
@@ -134,6 +140,7 @@ class TestPigServiceCheck(RMFTestCase):
 
     copy_to_hdfs_mock.assert_called_with("tez", "hadoop", "hdfs", host_sys_prepped=False)
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py b/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
index 4c3a3d7..cac8bf7 100644
--- a/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
+++ b/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
@@ -28,6 +28,7 @@ from only_for_platform import not_for_platform, PLATFORM_WINDOWS
 class TestJobHistoryServer(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "SPARK/1.2.0.2.2/package"
   STACK_VERSION = "2.2"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
   def test_configure_default(self, copy_to_hdfs_mock):
@@ -54,6 +55,7 @@ class TestJobHistoryServer(RMFTestCase):
     )
     self.assert_configure_default()
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
@@ -117,6 +119,7 @@ class TestJobHistoryServer(RMFTestCase):
     )
 
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         action=['execute'],
         hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         default_fs= UnknownConfigurationMock(),
@@ -169,6 +172,7 @@ class TestJobHistoryServer(RMFTestCase):
         mode = 0775
     )
     self.assertResourceCalled('HdfsResource', '/user/spark',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
@@ -185,6 +189,7 @@ class TestJobHistoryServer(RMFTestCase):
         mode = 0775,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
@@ -240,6 +245,7 @@ class TestJobHistoryServer(RMFTestCase):
         mode = 0775
     )
     self.assertResourceCalled('HdfsResource', '/user/spark',
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
@@ -256,6 +262,7 @@ class TestJobHistoryServer(RMFTestCase):
         mode = 0775,
     )
     self.assertResourceCalled('HdfsResource', None,
+        immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/default.json b/ambari-server/src/test/python/stacks/2.2/configs/default.json
index 319eb05..7583e27 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/default.json
@@ -3,6 +3,7 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
@@ -183,6 +184,7 @@
             "yarn.resourcemanager.scheduler.address": "c6401.ambari.apache.org:8030"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/default_custom_path_config.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/default_custom_path_config.json b/ambari-server/src/test/python/stacks/2.2/configs/default_custom_path_config.json
index b6a528d..8c168ca 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/default_custom_path_config.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/default_custom_path_config.json
@@ -3,6 +3,7 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred",
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
@@ -180,6 +181,7 @@
             "yarn.resourcemanager.scheduler.address": "c6401.ambari.apache.org:8030"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/falcon-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/falcon-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/falcon-upgrade.json
index 0a5f6e9..7499cdf 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/falcon-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/falcon-upgrade.json
@@ -30,7 +30,8 @@
     "clusterName": "c1", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
-        "jdk_location": "http://hw10897.ix:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://hw10897.ix:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45",
         "java_version": "8",
@@ -167,6 +168,7 @@
             "dfs.namenode.name.dir": "/hadoop/hdfs/namenode"
         }, 
         "falcon-env": {
+            "falcon_apps_hdfs_dir": "/apps/falcon",
             "falcon_port": "15000", 
             "falcon_pid_dir": "/var/run/falcon", 
             "falcon_log_dir": "/var/log/falcon", 
@@ -220,6 +222,7 @@
             "namenode_opt_permsize": "128m"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",
@@ -315,4 +318,4 @@
             "c6402.ambari.apache.org"
         ]
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
index d70ab3f..9122a69 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
@@ -34,6 +34,7 @@
     "clusterName": "c1",
     "hostname": "c6402.ambari.apache.org",
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://hw10897.ix:8080/resources/",
@@ -374,6 +375,7 @@
             "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json b/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
index 9d2fd4a..a43f15a 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
@@ -64,6 +64,7 @@
     "clusterName": "c1", 
     "hostname": "c6406.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6406.ambari.apache.org:8080/resources/", 
@@ -1028,6 +1029,7 @@
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n# ***** Set root logger level to DEBUG and its only appender to A.\nlog4j.logger.org.apache.pig=info, A\n\n# ***** A is set to be a ConsoleAppender.\
 nlog4j.appender.A=org.apache.log4j.ConsoleAppender\n# ***** A uses PatternLayout.\nlog4j.appender.A.layout=org.apache.log4j.PatternLayout\nlog4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "true",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade.json
index ab32b44..59cd868 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade.json
@@ -64,6 +64,7 @@
     "clusterName": "c1", 
     "hostname": "c6406.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6406.ambari.apache.org:8080/resources/", 
@@ -1028,6 +1029,7 @@
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n# ***** Set root logger level to DEBUG and its only appender to A.\nlog4j.logger.org.apache.pig=info, A\n\n# ***** A is set to be a ConsoleAppender.\
 nlog4j.appender.A=org.apache.log4j.ConsoleAppender\n# ***** A uses PatternLayout.\nlog4j.appender.A.layout=org.apache.log4j.PatternLayout\nlog4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
index 77b2662..63b729e 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/knox_upgrade.json
@@ -33,7 +33,8 @@
     "clusterName": "c1", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
-        "stack_name": "HDP", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "stack_name": "HDP",
         "group_list": "[\"hadoop\",\"users\",\"knox\"]", 
         "host_sys_prepped": "false", 
         "ambari_db_rca_username": "mapred", 
@@ -132,7 +133,8 @@
             "java.security.krb5.conf": "/etc/knox/conf/krb5.conf"
         }, 
         "cluster-env": {
-            "security_enabled": "false", 
+            "managed_hdfs_resource_property_names": "",
+            "security_enabled": "false",
             "ignore_groupsusers_create": "false", 
             "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
             "kerberos_domain": "EXAMPLE.COM", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/oozie-downgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/oozie-downgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/oozie-downgrade.json
index b9beed1..7e5346c 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/oozie-downgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/oozie-downgrade.json
@@ -31,6 +31,7 @@
     "clusterName": "c1", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://hw10897.ix:8080/resources/", 
@@ -174,6 +175,7 @@
             "oozie.system.id": "oozie-${user.name}"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",
@@ -272,4 +274,4 @@
             "c6402.ambari.apache.org"
         ]
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/oozie-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/oozie-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/oozie-upgrade.json
index 1c5f4bc..92c09c3 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/oozie-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/oozie-upgrade.json
@@ -31,6 +31,7 @@
     "clusterName": "c1", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
@@ -174,6 +175,7 @@
             "oozie.system.id": "oozie-${user.name}"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",
@@ -272,4 +274,4 @@
             "c6402.ambari.apache.org"
         ]
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json b/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
index 2cc66a6..f60fa8f 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
@@ -36,7 +36,8 @@
     "clusterName": "c1", 
     "hostname": "c6403.ambari.apache.org", 
     "hostLevelParams": {
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_67",
         "java_version": "8",
@@ -521,6 +522,7 @@
             "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n# ***** Set root logger level to DEBUG and its only appender to A.\nlog4j.logger.org.apache.pig=info, A\n\n# ***** A is set to be a ConsoleAppender.\
 nlog4j.appender.A=org.apache.log4j.ConsoleAppender\n# ***** A uses PatternLayout.\nlog4j.appender.A.layout=org.apache.log4j.PatternLayout\nlog4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "true",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",
@@ -647,4 +649,4 @@
             "c6402.ambari.apache.org"
         ]
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-default.json b/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-default.json
index 2dbf161..eb2cba8 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-default.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-default.json
@@ -3,6 +3,7 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred",
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
@@ -148,6 +149,7 @@
             "yarn.resourcemanager.scheduler.address": "c6401.ambari.apache.org:8030"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-secured.json b/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-secured.json
index d7027d3..c3a9b71 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-secured.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-secured.json
@@ -3,6 +3,7 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred",
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
@@ -129,6 +130,7 @@
             "content": " "
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "true",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-upgrade.json
index 9fd16d3..481b6d4 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-upgrade.json
@@ -56,7 +56,8 @@
     "clusterName": "c1", 
     "hostname": "c6408.ambari.apache.org", 
     "hostLevelParams": {
-        "jdk_location": "http://c6407.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://c6407.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_67",
         "java_version": "8",
@@ -802,6 +803,7 @@
             "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "true",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",
@@ -1009,4 +1011,4 @@
             "c6409.ambari.apache.org"
         ]
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/ranger-usersync-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/ranger-usersync-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/ranger-usersync-upgrade.json
index a354d61..200ad23 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/ranger-usersync-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/ranger-usersync-upgrade.json
@@ -56,7 +56,8 @@
     "clusterName": "c1", 
     "hostname": "c6408.ambari.apache.org", 
     "hostLevelParams": {
-        "jdk_location": "http://c6407.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://c6407.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_67",
         "java_version": "8",
@@ -801,6 +802,7 @@
             "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM"
         }, 
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "true",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",
@@ -1008,4 +1010,4 @@
             "c6409.ambari.apache.org"
         ]
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/secured.json b/ambari-server/src/test/python/stacks/2.2/configs/secured.json
index dbe7252..9b17aa3 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/secured.json
@@ -3,6 +3,7 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
         "agent_stack_retry_count": "5",
         "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
@@ -169,6 +170,7 @@
             "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "true",
             "ignore_groupsusers_create": "false",
             "smokeuser": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/66267961/ambari-server/src/test/python/stacks/2.2/configs/spark-job-history-server.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/spark-job-history-server.json b/ambari-server/src/test/python/stacks/2.2/configs/spark-job-history-server.json
index ea617fa..a187d59 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/spark-job-history-server.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/spark-job-history-server.json
@@ -26,7 +26,8 @@
     "clusterName": "c1", 
     "hostname": "c6408.ambari.apache.org", 
     "hostLevelParams": {
-        "jdk_location": "http://c6408.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://c6408.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_67",
         "java_version": "8",
@@ -97,6 +98,7 @@
             "content": "\n# syntax: [instance].sink|source.[name].[options]=[value]\n\n# This file configures Spark's internal metrics system. The metrics system is\n# divided into instances which correspond to internal components.\n# Each instance can be configured to report its metrics to one or more sinks.\n# Accepted values for [instance] are \"master\", \"worker\", \"executor\", \"driver\",\n# and \"applications\". A wild card \"*\" can be used as an instance name, in\n# which case all instances will inherit the supplied property.\n#\n# Within an instance, a \"source\" specifies a particular set of grouped metrics.\n# there are two kinds of sources:\n# 1. Spark internal sources, like MasterSource, WorkerSource, etc, which will\n# collect a Spark component's internal state. Each instance is paired with a\n# Spark source that is added automatically.\n# 2. Common sources, like JvmSource, which will collect low level state.\n# These can be added through configuration options and ar
 e then loaded\n# using reflection.\n#\n# A \"sink\" specifies where metrics are delivered to. Each instance can be\n# assigned one or more sinks.\n#\n# The sink|source field specifies whether the property relates to a sink or\n# source.\n#\n# The [name] field specifies the name of source or sink.\n#\n# The [options] field is the specific property of this source or sink. The\n# source or sink is responsible for parsing this property.\n#\n# Notes:\n# 1. To add a new sink, set the \"class\" option to a fully qualified class\n# name (see examples below).\n# 2. Some sinks involve a polling period. The minimum allowed polling period\n# is 1 second.\n# 3. Wild card properties can be overridden by more specific properties.\n# For example, master.sink.console.period takes precedence over\n# *.sink.console.period.\n# 4. A metrics specific configuration\n# \"spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties\" should be\n# added to Java properties using -Dspark.metrics.conf=xxx if you wa
 nt to\n# customize metrics system. You can also put the file in ${SPARK_HOME}/conf\n# and it will be loaded automatically.\n# 5. MetricsServlet is added by default as a sink in master, worker and client\n# driver, you can send http request \"/metrics/json\" to get a snapshot of all the\n# registered metrics in json format. For master, requests \"/metrics/master/json\" and\n# \"/metrics/applications/json\" can be sent seperately to get metrics snapshot of\n# instance master and applications. MetricsServlet may not be configured by self.\n#\n\n## List of available sinks and their properties.\n\n# org.apache.spark.metrics.sink.ConsoleSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n\n# org.apache.spark.metrics.sink.CSVSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n# directory /tmp Where to store CSV files\n\n# org.apache.spark.metrics.sink.GangliaSink\n# Name: Default: Description:\n# 
 host NONE Hostname or multicast group of Ganglia server\n# port NONE Port of Ganglia server(s)\n# period 10 Poll period\n# unit seconds Units of poll period\n# ttl 1 TTL of messages sent by Ganglia\n# mode multicast Ganglia network mode ('unicast' or 'multicast')\n\n# org.apache.spark.metrics.sink.JmxSink\n\n# org.apache.spark.metrics.sink.MetricsServlet\n# Name: Default: Description:\n# path VARIES* Path prefix from the web server root\n# sample false Whether to show entire set of samples for histograms ('false' or 'true')\n#\n# * Default path is /metrics/json for all instances except the master. The master has two paths:\n# /metrics/aplications/json # App information\n# /metrics/master/json # Master information\n\n# org.apache.spark.metrics.sink.GraphiteSink\n# Name: Default: Description:\n# host NONE Hostname of Graphite server\n# port NONE Port of Graphite server\n# period 10 Poll period\n# unit seconds Units of poll period\n# prefix EMPTY STRING Prefix to prepend to metric name
 \n\n## Examples\n# Enable JmxSink for all instances by class name\n#*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink\n\n# Enable ConsoleSink for all instances by class name\n#*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink\n\n# Polling period for ConsoleSink\n#*.sink.console.period=10\n\n#*.sink.console.unit=seconds\n\n# Master instance overlap polling period\n#master.sink.console.period=15\n\n#master.sink.console.unit=seconds\n\n# Enable CsvSink for all instances\n#*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink\n\n# Polling period for CsvSink\n#*.sink.csv.period=1\n\n#*.sink.csv.unit=minutes\n\n# Polling directory for CsvSink\n#*.sink.csv.directory=/tmp/\n\n# Worker instance overlap polling period\n#worker.sink.csv.period=10\n\n#worker.sink.csv.unit=minutes\n\n# Enable jvm source for instance master, worker, driver and executor\n#master.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#worker.source.jvm.class=org.apache.spark.metrics.source
 .JvmSource\n\n#driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource"
         },
         "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
             "kerberos_domain": "EXAMPLE.COM",
@@ -150,4 +152,4 @@
             "c6408.ambari.apache.org"
         ]
     }
-}
\ No newline at end of file
+}