You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2014/07/16 14:59:03 UTC

[11/11] git commit: AMBARI-6488. Move global to env in stack definitions (aonishuk)

AMBARI-6488. Move global to env in stack definitions (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b0ae1fdd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b0ae1fdd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b0ae1fdd

Branch: refs/heads/trunk
Commit: b0ae1fdde4cb5f58f64df23f4ffa4e35b25c3601
Parents: 2636029
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Wed Jul 16 15:58:23 2014 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Wed Jul 16 15:58:23 2014 +0300

----------------------------------------------------------------------
 .../libraries/functions/default.py              |   5 -
 .../ambari/server/agent/HeartbeatMonitor.java   |  59 ++--
 .../internal/ClusterResourceProvider.java       |   3 +-
 .../custom_actions/ambari_hdfs_rebalancer.py    |   2 +-
 .../1.3.2/hooks/after-INSTALL/scripts/params.py |  29 +-
 .../scripts/shared_initialization.py            |   2 +-
 .../after-INSTALL/templates/hadoop-env.sh.j2    | 142 --------
 .../hooks/before-INSTALL/scripts/params.py      |  61 ++--
 .../scripts/shared_initialization.py            |   6 -
 .../1.3.2/hooks/before-START/scripts/params.py  |  34 +-
 .../GANGLIA/configuration/ganglia-env.xml       |  70 ++++
 .../services/GANGLIA/configuration/global.xml   |  70 ----
 .../HDP/1.3.2/services/GANGLIA/metainfo.xml     |   2 +-
 .../services/GANGLIA/package/scripts/params.py  |  20 +-
 .../GANGLIA/package/scripts/status_params.py    |   2 +-
 .../services/HBASE/configuration/global.xml     |  50 ---
 .../services/HBASE/configuration/hbase-env.xml  | 122 +++++++
 .../HDP/1.3.2/services/HBASE/metainfo.xml       |   2 +-
 .../services/HBASE/package/scripts/hbase.py     |   5 +-
 .../services/HBASE/package/scripts/params.py    |  35 +-
 .../HBASE/package/scripts/status_params.py      |   4 +-
 .../HBASE/package/templates/hbase-env.sh.j2     | 101 ------
 .../services/HDFS/configuration/global.xml      |  87 -----
 .../services/HDFS/configuration/hadoop-env.xml  | 211 ++++++++++++
 .../stacks/HDP/1.3.2/services/HDFS/metainfo.xml |   2 +-
 .../services/HDFS/package/scripts/params.py     |  37 +-
 .../HDFS/package/scripts/status_params.py       |   4 +-
 .../services/HIVE/configuration/global.xml      | 100 ------
 .../services/HIVE/configuration/hive-env.xml    | 139 ++++++++
 .../stacks/HDP/1.3.2/services/HIVE/metainfo.xml |   4 +-
 .../1.3.2/services/HIVE/package/scripts/hive.py |   2 +-
 .../services/HIVE/package/scripts/params.py     |  37 +-
 .../HIVE/package/scripts/status_params.py       |   4 +-
 .../HIVE/package/templates/hive-env.sh.j2       |  78 -----
 .../services/MAPREDUCE/configuration/global.xml | 150 ---------
 .../MAPREDUCE/configuration/mapred-env.xml      | 150 +++++++++
 .../HDP/1.3.2/services/MAPREDUCE/metainfo.xml   |   2 +-
 .../MAPREDUCE/package/scripts/params.py         |  18 +-
 .../MAPREDUCE/package/scripts/status_params.py  |   4 +-
 .../services/NAGIOS/configuration/global.xml    |  51 ---
 .../NAGIOS/configuration/nagios-env.xml         |  51 +++
 .../HDP/1.3.2/services/NAGIOS/metainfo.xml      |   2 +-
 .../services/NAGIOS/package/scripts/params.py   |  20 +-
 .../services/OOZIE/configuration/global.xml     |  60 ----
 .../services/OOZIE/configuration/oozie-env.xml  | 120 +++++++
 .../HDP/1.3.2/services/OOZIE/metainfo.xml       |   2 +-
 .../services/OOZIE/package/scripts/oozie.py     |   7 +-
 .../services/OOZIE/package/scripts/params.py    |  27 +-
 .../OOZIE/package/scripts/status_params.py      |   2 +-
 .../OOZIE/package/templates/oozie-env.sh.j2     |  88 -----
 .../services/PIG/configuration/pig-env.xml      |  34 ++
 .../stacks/HDP/1.3.2/services/PIG/metainfo.xml  |   2 +-
 .../services/PIG/package/scripts/params.py      |  11 +-
 .../1.3.2/services/PIG/package/scripts/pig.py   |   9 +-
 .../PIG/package/templates/pig-env.sh.j2         |  36 --
 .../services/SQOOP/configuration/sqoop-env.xml  |  49 +++
 .../HDP/1.3.2/services/SQOOP/metainfo.xml       |   3 +
 .../services/SQOOP/package/scripts/params.py    |  11 +-
 .../services/SQOOP/package/scripts/sqoop.py     |   6 +-
 .../SQOOP/package/templates/sqoop-env.sh.j2     |  36 --
 .../WEBHCAT/configuration/webhcat-env.xml       |  54 +++
 .../WEBHCAT/configuration/webhcat-site.xml      |  30 ++
 .../HDP/1.3.2/services/WEBHCAT/metainfo.xml     |   1 +
 .../services/WEBHCAT/package/scripts/params.py  |  22 +-
 .../WEBHCAT/package/scripts/status_params.py    |   2 +-
 .../services/WEBHCAT/package/scripts/webhcat.py |   2 +-
 .../WEBHCAT/package/templates/webhcat-env.sh.j2 |  63 ----
 .../services/ZOOKEEPER/configuration/global.xml |  65 ----
 .../ZOOKEEPER/configuration/zookeeper-env.xml   |  83 +++++
 .../HDP/1.3.2/services/ZOOKEEPER/metainfo.xml   |   2 +-
 .../ZOOKEEPER/package/scripts/params.py         |  27 +-
 .../ZOOKEEPER/package/scripts/status_params.py  |   2 +-
 .../ZOOKEEPER/package/scripts/zookeeper.py      |   8 +-
 .../package/templates/zookeeper-env.sh.j2       |  44 ---
 .../2.0.6/hooks/after-INSTALL/scripts/params.py |  29 +-
 .../scripts/shared_initialization.py            |   2 +-
 .../after-INSTALL/templates/hadoop-env.sh.j2    | 146 --------
 .../hooks/before-INSTALL/scripts/params.py      |  40 +--
 .../2.0.6/hooks/before-START/scripts/params.py  |  38 +--
 .../services/FLUME/configuration/flume-env.xml  |  34 ++
 .../services/FLUME/configuration/global.xml     |  34 --
 .../HDP/2.0.6/services/FLUME/metainfo.xml       |   2 +-
 .../services/FLUME/package/scripts/params.py    |   4 +-
 .../GANGLIA/configuration/ganglia-env.xml       |  70 ++++
 .../services/GANGLIA/configuration/global.xml   |  70 ----
 .../HDP/2.0.6/services/GANGLIA/metainfo.xml     |   2 +-
 .../services/GANGLIA/package/scripts/params.py  |  22 +-
 .../GANGLIA/package/scripts/status_params.py    |   2 +-
 .../services/HBASE/configuration/global.xml     |  50 ---
 .../services/HBASE/configuration/hbase-env.xml  | 122 +++++++
 .../HDP/2.0.6/services/HBASE/metainfo.xml       |   2 +-
 .../services/HBASE/package/scripts/hbase.py     |   7 +-
 .../services/HBASE/package/scripts/params.py    |  35 +-
 .../HBASE/package/scripts/status_params.py      |   4 +-
 .../HBASE/package/templates/hbase-env.sh.j2     | 100 ------
 .../services/HDFS/configuration/global.xml      |  87 -----
 .../services/HDFS/configuration/hadoop-env.xml  | 213 ++++++++++++
 .../stacks/HDP/2.0.6/services/HDFS/metainfo.xml |   2 +-
 .../services/HDFS/package/scripts/namenode.py   |   1 +
 .../services/HDFS/package/scripts/params.py     |  34 +-
 .../HDFS/package/scripts/status_params.py       |   4 +-
 .../services/HIVE/configuration/global.xml      |  90 -----
 .../services/HIVE/configuration/hive-env.xml    | 131 +++++++
 .../stacks/HDP/2.0.6/services/HIVE/metainfo.xml |   2 +-
 .../2.0.6/services/HIVE/package/scripts/hive.py |   2 +-
 .../services/HIVE/package/scripts/params.py     |  39 +--
 .../HIVE/package/scripts/status_params.py       |   4 +-
 .../HIVE/package/templates/hive-env.sh.j2       |  79 -----
 .../services/NAGIOS/configuration/global.xml    |  51 ---
 .../NAGIOS/configuration/nagios-env.xml         |  51 +++
 .../HDP/2.0.6/services/NAGIOS/metainfo.xml      |   2 +-
 .../services/NAGIOS/package/scripts/params.py   |  22 +-
 .../services/OOZIE/configuration/global.xml     |  60 ----
 .../services/OOZIE/configuration/oozie-env.xml  | 128 +++++++
 .../HDP/2.0.6/services/OOZIE/metainfo.xml       |   2 +-
 .../services/OOZIE/package/scripts/oozie.py     |   5 +-
 .../services/OOZIE/package/scripts/params.py    |  26 +-
 .../OOZIE/package/scripts/status_params.py      |   2 +-
 .../OOZIE/package/templates/oozie-env.sh.j2     |  95 ------
 .../services/PIG/configuration/pig-env.xml      |  38 +++
 .../stacks/HDP/2.0.6/services/PIG/metainfo.xml  |   2 +-
 .../services/PIG/package/scripts/params.py      |  11 +-
 .../2.0.6/services/PIG/package/scripts/pig.py   |   5 +-
 .../PIG/package/templates/pig-env.sh.j2         |  39 ---
 .../services/SQOOP/configuration/sqoop-env.xml  |  49 +++
 .../HDP/2.0.6/services/SQOOP/metainfo.xml       |   3 +
 .../services/SQOOP/package/scripts/params.py    |  11 +-
 .../services/SQOOP/package/scripts/sqoop.py     |   7 +-
 .../SQOOP/package/templates/sqoop-env.sh.j2     |  54 ---
 .../WEBHCAT/configuration/webhcat-env.xml       |  54 +++
 .../HDP/2.0.6/services/WEBHCAT/metainfo.xml     |   1 +
 .../services/WEBHCAT/package/scripts/params.py  |  22 +-
 .../WEBHCAT/package/scripts/status_params.py    |   2 +-
 .../services/WEBHCAT/package/scripts/webhcat.py |   2 +-
 .../WEBHCAT/package/templates/webhcat-env.sh.j2 |  62 ----
 .../YARN/configuration-mapred/global.xml        |  44 ---
 .../YARN/configuration-mapred/mapred-env.xml    |  64 ++++
 .../services/YARN/configuration/global.xml      |  55 ---
 .../services/YARN/configuration/yarn-env.xml    | 173 ++++++++++
 .../stacks/HDP/2.0.6/services/YARN/metainfo.xml |   4 +-
 .../services/YARN/package/scripts/params.py     |  34 +-
 .../YARN/package/scripts/status_params.py       |   8 +-
 .../2.0.6/services/YARN/package/scripts/yarn.py |   4 +-
 .../YARN/package/templates/mapred-env.sh.j2     |  27 --
 .../YARN/package/templates/yarn-env.sh.j2       | 146 --------
 .../services/ZOOKEEPER/configuration/global.xml |  65 ----
 .../ZOOKEEPER/configuration/zookeeper-env.xml   |  83 +++++
 .../HDP/2.0.6/services/ZOOKEEPER/metainfo.xml   |   2 +-
 .../ZOOKEEPER/package/scripts/params.py         |  27 +-
 .../ZOOKEEPER/package/scripts/status_params.py  |   2 +-
 .../ZOOKEEPER/package/scripts/zookeeper.py      |   8 +-
 .../package/templates/zookeeper-env.sh.j2       |  44 ---
 .../FALCON/configuration/falcon-env.xml         | 109 ++++++
 .../services/FALCON/configuration/global.xml    |  63 ----
 .../stacks/HDP/2.1/services/FALCON/metainfo.xml |   2 +-
 .../services/FALCON/package/scripts/falcon.py   |   2 +-
 .../services/FALCON/package/scripts/params.py   |  31 +-
 .../FALCON/package/scripts/status_params.py     |   2 +-
 .../FALCON/package/templates/falcon-env.sh.j2   |  73 ----
 .../HDP/2.1/services/HIVE/metainfo.xml.orig     |  51 +++
 .../HDP/2.1/services/HIVE/metainfo.xml.rej      |  13 +
 .../stacks/HDP/2.1/services/OOZIE/metainfo.xml  |   2 +-
 .../stacks/HDP/2.1/services/SQOOP/metainfo.xml  |   4 +
 .../2.1/services/STORM/configuration/global.xml |  39 ---
 .../services/STORM/configuration/storm-env.xml  |  55 +++
 .../stacks/HDP/2.1/services/STORM/metainfo.xml  |   2 +-
 .../services/STORM/package/scripts/params.py    |  13 +-
 .../STORM/package/scripts/status_params.py      |   2 +-
 .../2.1/services/STORM/package/scripts/storm.py |   7 +-
 .../STORM/package/templates/storm-env.sh.j2     |  45 ---
 .../2.1/services/TEZ/configuration/global.xml   |  29 --
 .../2.1/services/TEZ/configuration/tez-env.xml  |  45 +++
 .../stacks/HDP/2.1/services/TEZ/metainfo.xml    |   2 +-
 .../2.1/services/TEZ/package/scripts/params.py  |   5 +-
 .../HDP/2.1/services/TEZ/package/scripts/tez.py |   7 +-
 .../TEZ/package/templates/tez-env.sh.j2         |  23 --
 .../HDP/2.1/services/WEBHCAT/metainfo.xml       |   1 +
 .../2.1/services/YARN/configuration/global.xml  |  60 ----
 .../services/YARN/configuration/yarn-env.xml    | 178 ++++++++++
 .../stacks/HDP/2.1/services/YARN/metainfo.xml   |   2 +-
 .../server/agent/TestHeartbeatMonitor.java      |   4 +-
 .../internal/ClusterResourceProviderTest.java   |  26 +-
 .../stacks/1.3.2/HBASE/test_hbase_client.py     |   8 +-
 .../stacks/1.3.2/HBASE/test_hbase_master.py     |   8 +-
 .../1.3.2/HBASE/test_hbase_regionserver.py      |   8 +-
 .../stacks/1.3.2/HIVE/test_hive_client.py       |   4 +-
 .../stacks/1.3.2/HIVE/test_hive_metastore.py    |   4 +-
 .../stacks/1.3.2/HIVE/test_hive_server.py       |   4 +-
 .../stacks/1.3.2/OOZIE/test_oozie_client.py     |   6 +-
 .../stacks/1.3.2/OOZIE/test_oozie_server.py     |  12 +-
 .../python/stacks/1.3.2/PIG/test_pig_client.py  |   8 +-
 .../python/stacks/1.3.2/SQOOP/test_sqoop.py     |   4 +-
 .../stacks/1.3.2/WEBHCAT/test_webhcat_server.py |   4 +-
 .../1.3.2/ZOOKEEPER/test_zookeeper_client.py    |   4 +-
 .../1.3.2/ZOOKEEPER/test_zookeeper_server.py    |   4 +-
 .../1.3.2/configs/default.hbasedecom.json       | 187 ++++++----
 .../python/stacks/1.3.2/configs/default.json    | 190 +++++++----
 .../1.3.2/configs/default.non_gmetad_host.json  | 187 ++++++----
 .../python/stacks/1.3.2/configs/secured.json    | 154 +++++++++
 .../hooks/after-INSTALL/test_after_install.py   |   2 +-
 .../hooks/before-START/test_before_start.py     |   4 +-
 .../stacks/2.0.6/HBASE/test_hbase_client.py     |  14 +-
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |   8 +-
 .../2.0.6/HBASE/test_hbase_regionserver.py      |   8 +-
 .../stacks/2.0.6/HIVE/test_hive_client.py       |   4 +-
 .../stacks/2.0.6/HIVE/test_hive_metastore.py    |   4 +-
 .../stacks/2.0.6/HIVE/test_hive_server.py       |   4 +-
 .../stacks/2.0.6/OOZIE/test_oozie_client.py     |  10 +-
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     |   6 +-
 .../python/stacks/2.0.6/PIG/test_pig_client.py  |  10 +-
 .../python/stacks/2.0.6/SQOOP/test_sqoop.py     |   5 +-
 .../stacks/2.0.6/WEBHCAT/test_webhcat_server.py |   4 +-
 .../stacks/2.0.6/YARN/test_historyserver.py     |   8 +-
 .../stacks/2.0.6/YARN/test_mapreduce2_client.py |   8 +-
 .../stacks/2.0.6/YARN/test_nodemanager.py       |   8 +-
 .../stacks/2.0.6/YARN/test_resourcemanager.py   |   8 +-
 .../stacks/2.0.6/YARN/test_yarn_client.py       |  12 +-
 .../2.0.6/ZOOKEEPER/test_zookeeper_client.py    |  16 +-
 .../2.0.6/ZOOKEEPER/test_zookeeper_server.py    |  16 +-
 .../2.0.6/configs/default.hbasedecom.json       | 207 +++++++-----
 .../python/stacks/2.0.6/configs/default.json    | 213 +++++++-----
 .../2.0.6/configs/default.non_gmetad_host.json  | 202 ++++++-----
 .../python/stacks/2.0.6/configs/ha_default.json | 129 +++++++
 .../python/stacks/2.0.6/configs/ha_secured.json | 283 ++++++++++------
 .../python/stacks/2.0.6/configs/secured.json    | 337 ++++++++++---------
 .../hooks/after-INSTALL/test_after_install.py   |   4 +-
 .../stacks/2.1/FALCON/test_falcon_client.py     |   2 +-
 .../stacks/2.1/FALCON/test_falcon_server.py     |   2 +-
 .../stacks/2.1/HIVE/test_hive_metastore.py      |  20 +-
 .../stacks/2.1/STORM/test_storm_drpc_server.py  |   6 +-
 .../stacks/2.1/STORM/test_storm_nimbus.py       |   6 +-
 .../stacks/2.1/STORM/test_storm_nimbus_prod.py  |   8 +-
 .../2.1/STORM/test_storm_rest_api_service.py    |   6 +-
 .../stacks/2.1/STORM/test_storm_supervisor.py   |   6 +-
 .../2.1/STORM/test_storm_supervisor_prod.py     |   6 +-
 .../stacks/2.1/STORM/test_storm_ui_server.py    |   6 +-
 .../python/stacks/2.1/TEZ/test_tez_client.py    |   5 +-
 .../stacks/2.1/YARN/test_apptimelineserver.py   |   4 +-
 .../test/python/stacks/2.1/configs/default.json | 123 ++++++-
 .../test/python/stacks/2.1/configs/secured.json | 168 +++++++++
 240 files changed, 5229 insertions(+), 4352 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-agent/src/main/python/resource_management/libraries/functions/default.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/resource_management/libraries/functions/default.py b/ambari-agent/src/main/python/resource_management/libraries/functions/default.py
index 459cb5d..733c03a 100644
--- a/ambari-agent/src/main/python/resource_management/libraries/functions/default.py
+++ b/ambari-agent/src/main/python/resource_management/libraries/functions/default.py
@@ -25,13 +25,8 @@ from resource_management.libraries.script import Script
 from resource_management.libraries.script.config_dictionary import UnknownConfiguration
 from resource_management.core.logger import Logger
 
-default_subdict='/configurations/global'
-
 def default(name, default_value):
   subdicts = filter(None, name.split('/'))
-  
-  if not name.startswith('/'):
-    subdicts = filter(None, default_subdict.split('/')) + subdicts
 
   curr_dict = Script.get_config()
   for x in subdicts:

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
index cdf178c..8b8b252 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
@@ -240,37 +240,44 @@ public class HeartbeatMonitor implements Runnable {
 
     Map<String, Map<String, String>> configurations = new TreeMap<String, Map<String, String>>();
 
-    // get the cluster config for type 'global'
+    // get the cluster config for type '*-env'
     // apply config group overrides
 
-    Config clusterConfig = cluster.getDesiredConfigByType(GLOBAL);
-    if (clusterConfig != null) {
-      // cluster config for 'global'
-      Map<String, String> props = new HashMap<String, String>(clusterConfig.getProperties());
-
-      // Apply global properties for this host from all config groups
-      Map<String, Map<String, String>> allConfigTags = configHelper
-              .getEffectiveDesiredTags(cluster, hostname);
-
-      Map<String, Map<String, String>> configTags = new HashMap<String,
-              Map<String, String>>();
-
-      for (Map.Entry<String, Map<String, String>> entry : allConfigTags.entrySet()) {
-        if (entry.getKey().equals(GLOBAL)) {
-          configTags.put(GLOBAL, entry.getValue());
+    //Config clusterConfig = cluster.getDesiredConfigByType(GLOBAL);
+    Collection<Config> clusterConfigs = cluster.getAllConfigs();
+    
+    for(Config clusterConfig: clusterConfigs) {
+      if(!clusterConfig.getType().endsWith("-env"))
+        continue;
+    
+      if (clusterConfig != null) {
+        // cluster config for 'global'
+        Map<String, String> props = new HashMap<String, String>(clusterConfig.getProperties());
+  
+        // Apply global properties for this host from all config groups
+        Map<String, Map<String, String>> allConfigTags = configHelper
+                .getEffectiveDesiredTags(cluster, hostname);
+  
+        Map<String, Map<String, String>> configTags = new HashMap<String,
+                Map<String, String>>();
+  
+        for (Map.Entry<String, Map<String, String>> entry : allConfigTags.entrySet()) {
+          if (entry.getKey().equals(clusterConfig.getType())) {
+            configTags.put(clusterConfig.getType(), entry.getValue());
+          }
         }
-      }
-
-      Map<String, Map<String, String>> properties = configHelper
-              .getEffectiveConfigProperties(cluster, configTags);
-
-      if (!properties.isEmpty()) {
-        for (Map<String, String> propertyMap : properties.values()) {
-          props.putAll(propertyMap);
+  
+        Map<String, Map<String, String>> properties = configHelper
+                .getEffectiveConfigProperties(cluster, configTags);
+  
+        if (!properties.isEmpty()) {
+          for (Map<String, String> propertyMap : properties.values()) {
+            props.putAll(propertyMap);
+          }
         }
+  
+        configurations.put(clusterConfig.getType(), props);
       }
-
-      configurations.put(GLOBAL, props);
     }
 
     StatusCommand statusCmd = new StatusCommand();

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
index 59c18f9..464f116 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
@@ -602,7 +602,6 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
       clusterProperties.put(CLUSTER_DESIRED_CONFIGS_PROPERTY_ID +
           "/properties/" + entry.getKey(), entry.getValue());
     }
-
     getManagementController().updateClusters(
         Collections.singleton(getRequest(clusterProperties)), null);
   }
@@ -861,7 +860,7 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
     propertyUpdaters.put("hive.metastore.uris", new SingleHostPropertyUpdater("HIVE_SERVER"));
     propertyUpdaters.put("hive_ambari_host", new SingleHostPropertyUpdater("HIVE_SERVER"));
     propertyUpdaters.put("javax.jdo.option.ConnectionURL",
-        new DBPropertyUpdater("MYSQL_SERVER", "global", "hive_database"));
+        new DBPropertyUpdater("MYSQL_SERVER", "hive-env", "hive_database"));
 
     // OOZIE_SERVER
     propertyUpdaters.put("oozie.base.url", new SingleHostPropertyUpdater("OOZIE_SERVER"));

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/custom_actions/ambari_hdfs_rebalancer.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_actions/ambari_hdfs_rebalancer.py b/ambari-server/src/main/resources/custom_actions/ambari_hdfs_rebalancer.py
index 3a62713..fd1e5d1 100644
--- a/ambari-server/src/main/resources/custom_actions/ambari_hdfs_rebalancer.py
+++ b/ambari-server/src/main/resources/custom_actions/ambari_hdfs_rebalancer.py
@@ -37,7 +37,7 @@ class HdfsRebalance(Script):
 
     if security_enabled:
       kinit_path_local = functions.get_kinit_path(
-        [default('kinit_path_local', None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+        ["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
       principal = config['commandParams']['principal']
       keytab = config['commandParams']['keytab']
       Execute(format("{kinit_path_local}  -kt {keytab} {principal}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/params.py
index dab3fb9..a4fb172 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/params.py
@@ -31,31 +31,32 @@ java_home = config['hostLevelParams']['java_home']
 #hadoop params
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
 
 #hadoop-env.sh
 if System.get_instance().os_family == "suse":
   jsvc_path = "/usr/lib/bigtop-utils"
 else:
   jsvc_path = "/usr/libexec/bigtop-utils"
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['global']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
 
-jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
-jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
-jtnode_heapsize =  default("jtnode_heapsize","1024m")
-ttnode_heapsize = default("ttnode_heapsize","1024m")
+jtnode_opt_newsize = default("/configurations/mapred-env/jtnode_opt_newsize","200m")
+jtnode_opt_maxnewsize = default("/configurations/mapred-env/jtnode_opt_maxnewsize","200m")
+jtnode_heapsize =  default("/configurations/mapred-env/jtnode_heapsize","1024m")
+ttnode_heapsize = default("/configurations/mapred-env/ttnode_heapsize","1024m")
 
-dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 
-mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_pid_dir_prefix = default("/configurations/hadoop-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
 
 
 #users and groups
-hdfs_user = config['configurations']['global']['hdfs_user']
-user_group = config['configurations']['global']['user_group']
\ No newline at end of file
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['hadoop-env']['user_group']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/shared_initialization.py
index b3c0189..d37f1e3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -38,7 +38,7 @@ def setup_hadoop_env():
   
   File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'),
        owner=tc_owner,
-       content=Template('hadoop-env.sh.j2')
+       content=InlineTemplate(params.hadoop_env_sh_template)
   )
 
 def setup_config():

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/templates/hadoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/templates/hadoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/templates/hadoop-env.sh.j2
deleted file mode 100644
index cf69efa..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/templates/hadoop-env.sh.j2
+++ /dev/null
@@ -1,142 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-export JAVA_HOME={{java_home}}
-export HADOOP_HOME_WARN_SUPPRESS=1
-
-# Hadoop Configuration Directory
-#TODO: if env var set that can cause problems
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
-
-# Hadoop home directory
-export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
-
-# this is different for HDP1 #
-# Path to jsvc required by secure HDP 2.0 datanode
-# export JSVC_HOME={{jsvc_path}}
-
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-
-export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
-
-# History server logs
-export HADOOP_MAPRED_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
-export HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA -Dmapred.log.dir=$HADOOP_MAPRED_LOG_DIR ${HADOOP_JOBTRACKER_OPTS}"
-
-HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
-HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER={{hdfs_user}}
-
-# Extra ssh options.  Empty by default.
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
-
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
-export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# History server pid
-export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
-
-YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
-
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-
-# export HADOOP_NICENESS=10
-
-# Use libraries from standard classpath
-JAVA_JDBC_LIBS=""
-#Add libraries required by mysql connector
-for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by oracle connector
-for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by nodemanager
-MAPREDUCE_LIBS={{mapreduce_libs_path}}
-export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
-
-# Setting path to hdfs command line
-export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-
-#Mostly required for hadoop 2.0
-export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py
index e7a0d47..0ae4960 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py
@@ -47,43 +47,42 @@ if System.get_instance().os_family == "suse":
   jsvc_path = "/usr/lib/bigtop-utils"
 else:
   jsvc_path = "/usr/libexec/bigtop-utils"
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['global']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
-
-jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
-jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
-jtnode_heapsize =  default("jtnode_heapsize","1024m")
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+
+jtnode_opt_newsize = default("/configurations/mapred-env/jtnode_opt_newsize","200m")
+jtnode_opt_maxnewsize = default("/configurations/mapred-env/tnode_opt_maxnewsize","200m")
+jtnode_heapsize =  default("/configurations/mapred-env/jtnode_heapsize","1024m")
 ttnode_heapsize = "1024m"
 
-dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/hadoop-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-mapred_log_dir_prefix = default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+mapred_log_dir_prefix = "/var/log/hadoop-mapreduce"
 
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 
 #users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = config['configurations']['global']['hdfs_user']
-zk_user = config['configurations']['global']['zk_user']
-gmetad_user = config['configurations']['global']["gmetad_user"]
-gmond_user = config['configurations']['global']["gmond_user"]
-
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+nagios_user = config['configurations']['nagios-env']['nagios_user']
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+webhcat_user = config['configurations']['hive-env']['hcat_user']
+hcat_user = config['configurations']['hive-env']['hcat_user']
+hive_user = config['configurations']['hive-env']['hive_user']
+smoke_user =  config['configurations']['hadoop-env']['smokeuser']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+zk_user = config['configurations']['zookeeper-env']['zk_user']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+
+user_group = config['configurations']['hadoop-env']['user_group']
+proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
+nagios_group = config['configurations']['nagios-env']['nagios_group']
 smoke_user_group =  "users"
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 
@@ -123,7 +122,7 @@ if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
 
 hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-ignore_groupsusers_create = default("ignore_groupsusers_create", False)
+ignore_groupsusers_create = default("/configurations/hadoop-env/ignore_groupsusers_create", False)
 
 
 #repo params

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py
index f88d429..7f2af55 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -87,12 +87,6 @@ def setup_users():
          ignore_failures = params.ignore_groupsusers_create
     )
 
-  if params.has_resourcemanager:
-    User(params.yarn_user,
-         gid = params.user_group,
-         ignore_failures = params.ignore_groupsusers_create
-    )
-
   if params.has_ganglia_server:
     Group(params.gmetad_user,
          ignore_failures = params.ignore_groupsusers_create

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
index 6f22e79..4e58939 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
@@ -28,9 +28,9 @@ _authentication = config['configurations']['core-site']['hadoop.security.authent
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
 #users and groups
-hdfs_user = config['configurations']['global']['hdfs_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 
-user_group = config['configurations']['global']['user_group']
+user_group = config['configurations']['hadoop-env']['user_group']
 
 #hosts
 hostname = config["hostname"]
@@ -70,14 +70,14 @@ if has_ganglia_server:
 hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
 hadoop_lib_home = "/usr/lib/hadoop/lib"
 hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 hadoop_home = "/usr"
 hadoop_bin = "/usr/lib/hadoop/bin"
 
 task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
 limits_conf_dir = "/etc/security/limits.d"
 
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
 #db params
 server_db_name = config['hostLevelParams']['db_name']
@@ -93,8 +93,8 @@ ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver']
 ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username']
 ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password']
 
-if 'rca_enabled' in config['configurations']['global']:
-  rca_enabled =  config['configurations']['global']['rca_enabled']
+if 'rca_enabled' in config['configurations']['hadoop-env']:
+  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
 else:
   rca_enabled = False
 rca_disabled_prefix = "###"
@@ -110,21 +110,21 @@ if System.get_instance().os_family == "suse":
 else:
   jsvc_path = "/usr/libexec/bigtop-utils"
 
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['global']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
 
-jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
-jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
-jtnode_heapsize =  default("jtnode_heapsize","1024m")
-ttnode_heapsize = default("ttnode_heapsize","1024m")
+jtnode_opt_newsize = default("/configurations/mapred-env/jtnode_opt_newsize","200m")
+jtnode_opt_maxnewsize = default("/configurations/mapred-env/jtnode_opt_maxnewsize","200m")
+jtnode_heapsize =  default("/configurations/mapred-env/jtnode_heapsize","1024m")
+ttnode_heapsize = default("/configurations/mapred-env/ttnode_heapsize","1024m")
 
-dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = "/var/run/hadoop-mapreduce"
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-mapred_log_dir_prefix = default("mapred_log_dir_prefix",hdfs_log_dir_prefix)
+mapred_log_dir_prefix = hdfs_log_dir_prefix
 
 #taskcontroller.cfg
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/configuration/ganglia-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/configuration/ganglia-env.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/configuration/ganglia-env.xml
new file mode 100644
index 0000000..68d94f2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/configuration/ganglia-env.xml
@@ -0,0 +1,70 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>ganglia_conf_dir</name>
+    <value>/etc/ganglia/hdp</value>
+    <description>Config directory for Ganglia</description>
+  </property>
+  <property>
+    <name>ganglia_runtime_dir</name>
+    <value>/var/run/ganglia/hdp</value>
+    <description>Run directories for Ganglia</description>
+  </property>
+  <property>
+    <name>gmetad_user</name>
+    <value>nobody</value>
+    <description>User </description>
+  </property>
+    <property>
+    <name>gmond_user</name>
+    <value>nobody</value>
+    <description>User </description>
+  </property>
+  <property>
+    <name>rrdcached_base_dir</name>
+    <value>/var/lib/ganglia/rrds</value>
+    <description>Default directory for saving the rrd files on ganglia server</description>
+  </property>
+  <property>
+    <name>rrdcached_timeout</name>
+    <value>3600</value>
+    <description>(-w) Data is written to disk every timeout seconds. If this option is not specified the default interval of 300 seconds will be used.</description>
+  </property>
+  <property>
+    <name>rrdcached_flush_timeout</name>
+    <value>7200</value>
+    <description>(-f) Every timeout seconds the entire cache is searched for old values which are written to disk. This only concerns files to which updates have stopped, so setting this to a high value, such as 3600 seconds, is acceptable in most cases. This timeout defaults to 3600 seconds.</description>
+  </property>
+  <property>
+    <name>rrdcached_delay</name>
+    <value>1800</value>
+    <description>(-z) If specified, rrdcached will delay writing of each RRD for a random number of seconds in the range [0,delay). This will avoid too many writes being queued simultaneously. This value should be no greater than the value specified in -w. By default, there is no delay.</description>
+  </property>
+  <property>
+    <name>rrdcached_write_threads</name>
+    <value>4</value>
+    <description>(-t) Specifies the number of threads used for writing RRD files. The default is 4. Increasing this number will allow rrdcached to have more simultaneous I/O requests into the kernel. This may allow the kernel to re-order disk writes, resulting in better disk throughput.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/configuration/global.xml
deleted file mode 100644
index d1f369a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/configuration/global.xml
+++ /dev/null
@@ -1,70 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>ganglia_conf_dir</name>
-    <value>/etc/ganglia/hdp</value>
-    <description>Config directory for Ganglia</description>
-  </property>
-  <property>
-    <name>ganglia_runtime_dir</name>
-    <value>/var/run/ganglia/hdp</value>
-    <description>Run directories for Ganglia</description>
-  </property>
-  <property>
-    <name>gmetad_user</name>
-    <value>nobody</value>
-    <description>User </description>
-  </property>
-    <property>
-    <name>gmond_user</name>
-    <value>nobody</value>
-    <description>User </description>
-  </property>
-  <property>
-    <name>rrdcached_base_dir</name>
-    <value>/var/lib/ganglia/rrds</value>
-    <description>Default directory for saving the rrd files on ganglia server</description>
-  </property>
-  <property>
-    <name>rrdcached_timeout</name>
-    <value>3600</value>
-    <description>(-w) Data is written to disk every timeout seconds. If this option is not specified the default interval of 300 seconds will be used.</description>
-  </property>
-  <property>
-    <name>rrdcached_flush_timeout</name>
-    <value>7200</value>
-    <description>(-f) Every timeout seconds the entire cache is searched for old values which are written to disk. This only concerns files to which updates have stopped, so setting this to a high value, such as 3600 seconds, is acceptable in most cases. This timeout defaults to 3600 seconds.</description>
-  </property>
-  <property>
-    <name>rrdcached_delay</name>
-    <value>1800</value>
-    <description>(-z) If specified, rrdcached will delay writing of each RRD for a random number of seconds in the range [0,delay). This will avoid too many writes being queued simultaneously. This value should be no greater than the value specified in -w. By default, there is no delay.</description>
-  </property>
-  <property>
-    <name>rrdcached_write_threads</name>
-    <value>4</value>
-    <description>(-t) Specifies the number of threads used for writing RRD files. The default is 4. Increasing this number will allow rrdcached to have more simultaneous I/O requests into the kernel. This may allow the kernel to re-order disk writes, resulting in better disk throughput.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml
index 588708d..dc3ac6a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml
@@ -96,7 +96,7 @@
         </osSpecific>
       </osSpecifics>
       <configuration-dependencies>
-        <config-type>global</config-type>
+        <config-type>ganglia-env</config-type>
       </configuration-dependencies>
       <monitoringService>true</monitoringService>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/params.py
index 861aebb..50df082 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/params.py
@@ -22,21 +22,21 @@ import os
 
 config = Script.get_config()
 
-user_group = config['configurations']['global']["user_group"]
-ganglia_conf_dir = default("/configurations/global/ganglia_conf_dir","/etc/ganglia/hdp")
+user_group = config['configurations']['hadoop-env']["user_group"]
+ganglia_conf_dir = default("/configurations/ganglia-env/ganglia_conf_dir","/etc/ganglia/hdp")
 ganglia_dir = "/etc/ganglia"
-ganglia_runtime_dir = config['configurations']['global']["ganglia_runtime_dir"]
+ganglia_runtime_dir = config['configurations']['ganglia-env']["ganglia_runtime_dir"]
 ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
 
-gmetad_user = config['configurations']['global']["gmetad_user"]
-gmond_user = config['configurations']['global']["gmond_user"]
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
 
 webserver_group = "apache"
-rrdcached_base_dir = config['configurations']['global']["rrdcached_base_dir"]
-rrdcached_timeout = default("/configurations/global/rrdcached_timeout", 3600)
-rrdcached_flush_timeout = default("/configurations/global/rrdcached_flush_timeout", 7200)
-rrdcached_delay = default("/configurations/global/rrdcached_delay", 1800)
-rrdcached_write_threads = default("/configurations/global/rrdcached_write_threads", 4)
+rrdcached_base_dir = config['configurations']['ganglia-env']["rrdcached_base_dir"]
+rrdcached_timeout = default("/configurations/ganglia-env/rrdcached_timeout", 3600)
+rrdcached_flush_timeout = default("/configurations/ganglia-env/rrdcached_flush_timeout", 7200)
+rrdcached_delay = default("/configurations/ganglia-env/rrdcached_delay", 1800)
+rrdcached_write_threads = default("/configurations/ganglia-env/rrdcached_write_threads", 4)
 
 ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/status_params.py
index 3ccad2f..0c69ca9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/status_params.py
@@ -22,4 +22,4 @@ from resource_management import *
 
 config = Script.get_config()
 
-pid_dir = config['configurations']['global']['ganglia_runtime_dir']
+pid_dir = config['configurations']['ganglia-env']['ganglia_runtime_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/global.xml
deleted file mode 100644
index 2a00566..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/global.xml
+++ /dev/null
@@ -1,50 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_pid_dir</name>
-    <value>/var/run/hbase</value>
-    <description>Pid Directory for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
-    <description>HBase RegionServer Heap Size.</description>
-  </property>
-  <property>
-    <name>hbase_master_heapsize</name>
-    <value>1024</value>
-    <description>HBase Master Heap Size</description>
-  </property>
-  <property>
-    <name>hbase_user</name>
-    <value>hbase</value>
-    <description>HBase User Name.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-env.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-env.xml
new file mode 100644
index 0000000..3c5465d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-env.xml
@@ -0,0 +1,122 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hbase_log_dir</name>
+    <value>/var/log/hbase</value>
+    <description>Log Directories for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_pid_dir</name>
+    <value>/var/run/hbase</value>
+    <description>Pid Directory for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_regionserver_heapsize</name>
+    <value>1024</value>
+    <description>HBase RegionServer Heap Size.</description>
+  </property>
+  <property>
+    <name>hbase_master_heapsize</name>
+    <value>1024</value>
+    <description>HBase Master Heap Size</description>
+  </property>
+  <property>
+    <name>hbase_user</name>
+    <value>hbase</value>
+    <description>HBase User Name.</description>
+  </property>
+  
+  <!-- hbase-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hbase-env.sh content</description>
+    <value>
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+export HBASE_MASTER_OPTS="-Xmx{{master_heapsize}}"
+export HBASE_REGIONSERVER_OPTS="-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
+{% endif %}
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/metainfo.xml
index baf98df..3973035 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/metainfo.xml
@@ -112,9 +112,9 @@
       </commandScript>
 
       <configuration-dependencies>
-        <config-type>global</config-type>
         <config-type>hbase-policy</config-type>
         <config-type>hbase-site</config-type>
+        <config-type>hbase-env</config-type>
         <config-type>hbase-log4j</config-type>
       </configuration-dependencies>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase.py
index ec29ad9..0dc5347 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase.py
@@ -73,7 +73,10 @@ def hbase(name=None # 'master' or 'regionserver' or 'client'
       group = params.user_group
     )
   
-  hbase_TemplateConfig( 'hbase-env.sh')     
+  File(format("{hbase_conf_dir}/hbase-env.sh"),
+       owner=params.hbase_user,
+       content=InlineTemplate(params.hbase_env_sh_template)
+  )     
        
   hbase_TemplateConfig( params.metric_prop_file_name,
     tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/params.py
index f08891e..9b61365 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/params.py
@@ -34,37 +34,38 @@ hbase_excluded_hosts = config['commandParams']['excluded_hosts']
 hbase_drain_only = config['commandParams']['mark_draining_only']
 
 hbase_user = status_params.hbase_user
-smokeuser = config['configurations']['global']['smokeuser']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-user_group = config['configurations']['global']['user_group']
+user_group = config['configurations']['hadoop-env']['user_group']
 
 # this is "hadoop-metrics2-hbase.properties" for 2.x stacks
 metric_prop_file_name = "hadoop-metrics.properties"
 
 # not supporting 32 bit jdk.
 java64_home = config['hostLevelParams']['java_home']
+hbase_env_sh_template = config['configurations']['hbase-env']['content']
 
-log_dir = config['configurations']['global']['hbase_log_dir']
-master_heapsize = config['configurations']['global']['hbase_master_heapsize']
+log_dir = config['configurations']['hbase-env']['hbase_log_dir']
+master_heapsize = config['configurations']['hbase-env']['hbase_master_heapsize']
 
-regionserver_heapsize = config['configurations']['global']['hbase_regionserver_heapsize']
+regionserver_heapsize = config['configurations']['hbase-env']['hbase_regionserver_heapsize']
 regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, 0.2, 512)
 
 pid_dir = status_params.pid_dir
 tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
 
-client_jaas_config_file = default('hbase_client_jaas_config_file', format("{hbase_conf_dir}/hbase_client_jaas.conf"))
-master_jaas_config_file = default('hbase_master_jaas_config_file', format("{hbase_conf_dir}/hbase_master_jaas.conf"))
-regionserver_jaas_config_file = default('hbase_regionserver_jaas_config_file', format("{hbase_conf_dir}/hbase_regionserver_jaas.conf"))
+client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
+master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
+regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
 
 ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
 ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
 
-rs_hosts = default('hbase_rs_hosts', config['clusterHostInfo']['slave_hosts']) #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+rs_hosts = config['clusterHostInfo']['slave_hosts'] #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
 
-smoke_test_user = config['configurations']['global']['smokeuser']
-smokeuser_permissions = default('smokeuser_permissions', "RWXCA")
+smoke_test_user = config['configurations']['hadoop-env']['smokeuser']
+smokeuser_permissions = "RWXCA"
 service_check_data = functions.get_unique_id_and_date()
 
 if security_enabled:
@@ -74,9 +75,9 @@ if security_enabled:
 
 master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
 regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hbase_user_keytab = config['configurations']['global']['hbase_user_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['hadoop-env']['hbase_user_keytab']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 if security_enabled:
   kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
 else:
@@ -94,9 +95,9 @@ hbase_staging_dir = "/apps/hbase/staging"
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/status_params.py
index 8360507..850ec8b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/status_params.py
@@ -22,5 +22,5 @@ from resource_management import *
 
 config = Script.get_config()
 
-pid_dir = config['configurations']['global']['hbase_pid_dir']
-hbase_user = config['configurations']['global']['hbase_user']
+pid_dir = config['configurations']['hbase-env']['hbase_pid_dir']
+hbase_user = config['configurations']['hbase-env']['hbase_user']

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase-env.sh.j2
deleted file mode 100644
index 6a2ebb2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase-env.sh.j2
+++ /dev/null
@@ -1,101 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Set environment variables here.
-
-# The java implementation to use. Java 1.6 required.
-export JAVA_HOME={{java64_home}}
-
-# HBase Configuration directory
-export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
-
-# Extra Java CLASSPATH elements. Optional.
-export HBASE_CLASSPATH=${HBASE_CLASSPATH}
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HBASE_HEAPSIZE=1000
-
-# Extra Java runtime options.
-# Below are what we set by default. May only work with SUN JVM.
-# For more on why as well as other possible settings,
-# see http://wiki.apache.org/hadoop/PerformanceTuning
-export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
-export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
-# Uncomment below to enable java garbage collection logging.
-# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
-
-# Uncomment and adjust to enable JMX exporting
-# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
-# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-#
-# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
-export HBASE_MASTER_OPTS="-Xmx{{master_heapsize}}"
-export HBASE_REGIONSERVER_OPTS="-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
-# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
-# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
-
-# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
-export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
-
-# Extra ssh options. Empty by default.
-# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
-
-# Where log files are stored. $HBASE_HOME/logs by default.
-export HBASE_LOG_DIR={{log_dir}}
-
-# A string representing this instance of hbase. $USER by default.
-# export HBASE_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes. See 'man nice'.
-# export HBASE_NICENESS=10
-
-# The directory where pid files are stored. /tmp by default.
-export HBASE_PID_DIR={{pid_dir}}
-
-# Seconds to sleep between slave commands. Unset by default. This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HBASE_SLAVE_SLEEP=0.1
-
-# Tell HBase whether it should manage it's own instance of Zookeeper or not.
-export HBASE_MANAGES_ZK=false
-
-{% if security_enabled %}
-export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/global.xml
deleted file mode 100644
index 3eb0a22..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/global.xml
+++ /dev/null
@@ -1,87 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <description>Proxy user group.</description>
-  </property>
-
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
-
-  <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <description>User and Groups.</description>
-  </property>
-  <property>
-    <name>ignore_groupsusers_create</name>
-    <value>false</value>
-    <description>Whether to ignores failures on users and group creation</description>
-  </property>
-  
-</configuration>