You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by su...@apache.org on 2014/08/25 19:55:25 UTC

git commit: AMBARI-6980. Refactor of 2.1.GlusterFS stack to move global to env (Scott Creeley via subin)

Repository: ambari
Updated Branches:
  refs/heads/trunk 3c13f40fa -> fefc129b1


AMBARI-6980. Refactor of 2.1.GlusterFS stack to move global to env (Scott Creeley via subin)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fefc129b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fefc129b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fefc129b

Branch: refs/heads/trunk
Commit: fefc129b1a50cdc0c8a07e37893a9b3f6ddda5b9
Parents: 3c13f40
Author: root <ro...@c6501.ambari.apache.org>
Authored: Mon Aug 25 17:49:16 2014 +0000
Committer: root <ro...@c6501.ambari.apache.org>
Committed: Mon Aug 25 17:49:16 2014 +0000

----------------------------------------------------------------------
 .../FALCON/configuration/falcon-env.xml         |  63 ++++++
 .../2.1.GlusterFS/services/FALCON/metainfo.xml  |   2 +-
 .../services/FALCON/package/scripts/params.py   |  32 +--
 .../FALCON/package/scripts/status_params.py     |   2 +-
 .../GLUSTERFS/configuration/core-site.xml       |  26 ---
 .../GLUSTERFS/configuration/hadoop-env.xml      | 207 +++++++++++++++++++
 .../services/GLUSTERFS/metainfo.xml             |   7 +-
 .../2.1.GlusterFS/services/OOZIE/metainfo.xml   |   2 +-
 .../services/STORM/configuration/storm-env.xml  |  39 ++++
 .../2.1.GlusterFS/services/STORM/metainfo.xml   |   2 +-
 .../services/STORM/package/scripts/params.py    |  12 +-
 .../STORM/package/scripts/status_params.py      |   2 +-
 .../services/TEZ/configuration/tez-env.xml      |  29 +++
 .../HDP/2.1.GlusterFS/services/TEZ/metainfo.xml |   2 +-
 .../services/TEZ/package/scripts/params.py      |   4 +-
 .../services/YARN/configuration/global.xml      |  64 ------
 .../services/YARN/configuration/yarn-env.xml    | 181 ++++++++++++++++
 .../2.1.GlusterFS/services/YARN/metainfo.xml    |   3 +-
 .../services/YARN/package/scripts/params.py     |  41 ++--
 .../YARN/package/scripts/status_params.py       |   8 +-
 ambari-web/app/data/HDP2/site_properties.js     | 178 ++++++++++------
 ambari-web/app/data/site_properties.js          | 160 ++++++++------
 ambari-web/app/models/stack_service.js          |   2 +-
 23 files changed, 791 insertions(+), 277 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/configuration/falcon-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/configuration/falcon-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/configuration/falcon-env.xml
new file mode 100644
index 0000000..fadc02d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/configuration/falcon-env.xml
@@ -0,0 +1,63 @@
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+  <property>
+    <name>falcon_user</name>
+    <value>falcon</value>
+    <description>Falcon user.</description>
+  </property>
+  <property>
+    <name>falcon_port</name>
+    <value>15000</value>
+    <description>Port the Falcon Server listens on.</description>
+  </property>
+  <property>
+    <name>falcon_log_dir</name>
+    <value>/var/log/falcon</value>
+    <description>Falcon log directory.</description>
+  </property>
+  <property>
+    <name>falcon_pid_dir</name>
+    <value>/var/run/falcon</value>
+    <description>Falcon pid-file directory.</description>
+  </property>
+  <property>
+    <name>falcon_local_dir</name>
+    <value>/hadoop/falcon</value>
+    <description>Directory where Falcon data, such as activemq data, is stored.</description>
+  </property>
+  <!--embeddedmq properties-->
+  <property>
+    <name>falcon.embeddedmq.data</name>
+    <value>/hadoop/falcon/embeddedmq/data</value>
+    <description>Directory in which embeddedmq data is stored.</description>
+  </property>
+  <property>
+    <name>falcon.embeddedmq</name>
+    <value>true</value>
+    <description>Whether embeddedmq is enabled or not.</description>
+  </property>
+  <property>
+    <name>falcon.emeddedmq.port</name>
+    <value>61616</value>
+    <description>Port that embeddedmq will listen on.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/metainfo.xml
index aa243e3..f66d99f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/metainfo.xml
@@ -80,7 +80,7 @@
 
       <configuration-dependencies>
         <config-type>oozie-site</config-type>
-        <config-type>global</config-type>
+        <config-type>falcon-env</config-type>
         <config-type>falcon-startup.properties</config-type>
         <config-type>falcon-runtime.properties</config-type>
       </configuration-dependencies>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/params.py
index 3372675..93e292d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/params.py
@@ -23,29 +23,30 @@ from status_params import *
 
 config = Script.get_config()
 
-oozie_user = config['configurations']['global']['oozie_user']
-falcon_user = config['configurations']['global']['falcon_user']
-smoke_user =  config['configurations']['global']['smokeuser']
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+falcon_user = config['configurations']['falcon-env']['falcon_user']
+smoke_user =  config['configurations']['hadoop-env']['smokeuser']
 
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
+user_group = config['configurations']['hadoop-env']['user_group']
+proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 
 java_home = config['hostLevelParams']['java_home']
 falcon_home = '/usr/lib/falcon'
 falcon_conf_dir = '/etc/falcon/conf'
-falcon_local_dir = config['configurations']['global']['falcon_local_dir']
-falcon_log_dir = config['configurations']['global']['falcon_log_dir']
+falcon_local_dir = config['configurations']['falcon-env']['falcon_local_dir']
+falcon_log_dir = config['configurations']['falcon-env']['falcon_log_dir']
 store_uri = config['configurations']['falcon-startup.properties']['*.config.store.uri']
 
-falcon_embeddedmq_data = config['configurations']['global']['falcon.embeddedmq.data']
-falcon_embeddedmq_enabled = config['configurations']['global']['falcon.embeddedmq']
-falcon_emeddedmq_port = config['configurations']['global']['falcon.emeddedmq.port']
+falcon_embeddedmq_data = config['configurations']['falcon-env']['falcon.embeddedmq.data']
+falcon_embeddedmq_enabled = config['configurations']['falcon-env']['falcon.embeddedmq']
+falcon_emeddedmq_port = config['configurations']['falcon-env']['falcon.emeddedmq.port']
 
 falcon_host = config['clusterHostInfo']['falcon_server_hosts'][0]
-falcon_port = config['configurations']['global']['falcon_port']
+falcon_port = config['configurations']['falcon-env']['falcon_port']
 falcon_runtime_properties = config['configurations']['falcon-runtime.properties']
 falcon_startup_properties = config['configurations']['falcon-startup.properties']
-smokeuser_keytab = config['configurations']['global']['smokeuser_keytab']
+smokeuser_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+falcon_env_sh_template = config['configurations']['falcon-env']['content']
 
 falcon_webapp_dir = '/var/lib/falcon/webapp'
 flacon_apps_dir = '/apps/falcon'
@@ -54,10 +55,9 @@ _authentication = config['configurations']['core-site']['hadoop.security.authent
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-hdfs_principal_name = config['configurations']['global']['hdfs_principal_name']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/status_params.py
index 7f3aaa0..6ebb35f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/status_params.py
@@ -20,5 +20,5 @@ limitations under the License.
 from resource_management import *
 
 config = Script.get_config()
-falcon_pid_dir = config['configurations']['global']['falcon_pid_dir']
+falcon_pid_dir = config['configurations']['falcon-env']['falcon_pid_dir']
 server_pid_file = format('{falcon_pid_dir}/falcon.pid')

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
index ea158a5..b1529f2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
@@ -22,35 +22,9 @@
 
 <configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
 
-<!-- i/o properties -->
-
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-
-
 <!-- file system properties -->
 
   <property>
-    <name>fs.defaultFS</name>
-    <!-- cluster variant -->
-    <value>glusterfs:///localhost:8020</value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-  
-  <property>
-    <name>fs.default.name</name>
-    <!-- cluster variant -->
-    <value>glusterfs:///localhost:8020</value>
-    <description>The name of the default file system.  Either the
-         literal string "local" or a host:port for NDFS.</description>
-  </property>
-
-  <property>
   <name>fs.AbstractFileSystem.glusterfs.impl</name>
   <value>org.apache.hadoop.fs.local.GlusterFs</value>
   </property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..6b00199
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
@@ -0,0 +1,207 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+  <property>
+    <name>fs.defaultFS</name>
+    <!-- cluster variant -->
+    <value>glusterfs:///localhost:8020</value>
+    <description>The name of the default file system.  Either the
+  literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+  <property>
+    <name>fs.default.name</name>
+    <!-- cluster variant -->
+    <value>glusterfs:///localhost:8020</value>
+    <description>The name of the default file system.  Either the
+         literal string "local" or a host:port for NDFS.</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+ <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>glusterfs_user</name>
+    <value>root</value>
+    <description></description>
+  </property>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_host</name>
+    <value></value>
+    <description>NameNode Host.</description>
+  </property>
+  <property>
+    <name>snamenode_host</name>
+    <value></value>
+    <description>Secondary NameNode.</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User to run HDFS as</description>
+  </property>
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+    <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hadoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/metainfo.xml
index 8908401..6b01ed0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/metainfo.xml
@@ -53,12 +53,9 @@
       </commandScript>
 
       <configuration-dependencies>
-      <!--
-        <config-type>yarn-site</config-type>
-        <config-type>mapred-site</config-type>
-      -->
         <config-type>core-site</config-type>
-        <config-type>global</config-type>
+        <config-type>hadoop-env</config-type>
+        <!--<config-type>hdfs-site</config-type>-->
       </configuration-dependencies>
 
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/OOZIE/metainfo.xml
index 186676f..6a8b98e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/OOZIE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/OOZIE/metainfo.xml
@@ -69,7 +69,7 @@
         
       </osSpecifics>
       <configuration-dependencies>
-        <config-type>global</config-type>
+        <config-type>oozie-env</config-type>
         <config-type>oozie-site</config-type>
         <config-type>oozie-log4j</config-type>
       </configuration-dependencies>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-env.xml
new file mode 100644
index 0000000..fc65cfa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-env.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+  <property>
+    <name>storm_user</name>
+    <value>storm</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm_log_dir</name>
+    <value>/var/log/storm</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm_pid_dir</name>
+    <value>/var/run/storm</value>
+    <description></description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/metainfo.xml
index d344295..14d8b56 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/metainfo.xml
@@ -109,7 +109,7 @@
 
       <configuration-dependencies>
         <config-type>storm-site</config-type>
-        <config-type>global</config-type>
+        <config-type>tez-env</config-type>
       </configuration-dependencies>
     </service>
   </services>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/scripts/params.py
index fcc8fbc..def5120 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/scripts/params.py
@@ -24,12 +24,12 @@ import status_params
 # server configurations
 config = Script.get_config()
 
-storm_user = config['configurations']['global']['storm_user']
-log_dir = config['configurations']['global']['storm_log_dir']
+storm_user = config['configurations']['storm-env']['storm_user']
+log_dir = config['configurations']['storm-env']['storm_log_dir']
 pid_dir = status_params.pid_dir
 conf_dir = "/etc/storm/conf"
 local_dir = config['configurations']['storm-site']['storm.local.dir']
-user_group = config['configurations']['global']['user_group']
+user_group = config['configurations']['hadoop-env']['user_group']
 java64_home = config['hostLevelParams']['java_home']
 nimbus_host = config['configurations']['storm-site']['nimbus.host']
 nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
@@ -53,7 +53,7 @@ security_enabled = ( not is_empty(_authentication) and _authentication == 'kerbe
 
 if security_enabled:
   _hostname_lowercase = config['hostname'].lower()
-  _kerberos_domain = config['configurations']['global']['kerberos_domain']
-  _storm_principal_name = config['configurations']['global']['storm_principal_name']
+  _kerberos_domain = config['configurations']['hadoop-env']['kerberos_domain']
+  _storm_principal_name = config['configurations']['storm-env']['storm_principal_name']
   storm_jaas_principal = _storm_principal_name.replace('_HOST',_hostname_lowercase)
-  storm_keytab_path = config['configurations']['global']['storm_keytab']
+  storm_keytab_path = config['configurations']['storm-env']['storm_keytab']

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/scripts/status_params.py
index 66b2a57..5eaa446 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/scripts/status_params.py
@@ -21,7 +21,7 @@ from resource_management import *
 
 config = Script.get_config()
 
-pid_dir = config['configurations']['global']['storm_pid_dir']
+pid_dir = config['configurations']['storm-env']['storm_pid_dir']
 pid_nimbus = format("{pid_dir}/nimbus.pid")
 pid_supervisor = format("{pid_dir}/supervisor.pid")
 pid_drpc = format("{pid_dir}/drpc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-env.xml
new file mode 100644
index 0000000..5ae9df1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-env.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+  <property>
+    <name>tez_user</name>
+    <value>tez</value>
+    <description></description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/metainfo.xml
index ed9cfe3..410266e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/metainfo.xml
@@ -46,7 +46,7 @@
       </osSpecifics>
 
       <configuration-dependencies>
-        <config-type>global</config-type>
+        <config-type>tez-env</config-type>
         <config-type>tez-site</config-type>
       </configuration-dependencies>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/package/scripts/params.py
index 8bc810a..38bcacf 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/package/scripts/params.py
@@ -28,5 +28,5 @@ config_dir = "/etc/tez/conf"
 hadoop_home = '/usr'
 java64_home = config['hostLevelParams']['java_home']
 
-tez_user = config['configurations']['global']['tez_user']
-user_group = config['configurations']['global']['user_group']
\ No newline at end of file
+tez_user = config['configurations']['tez-env']['tez_user']
+user_group = config['configurations']['hadoop-env']['user_group']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/global.xml
deleted file mode 100644
index af6b3e3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/global.xml
+++ /dev/null
@@ -1,64 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>yarn_log_dir_prefix</name>
-    <value>/var/log/hadoop-yarn</value>
-    <description>YARN Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>yarn_pid_dir_prefix</name>
-    <value>/var/run/hadoop-yarn</value>
-    <description>YARN PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>yarn_user</name>
-    <value>yarn</value>
-    <description>YARN User</description>
-  </property>
-  <property>
-    <name>yarn_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>resourcemanager_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>nodemanager_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>apptimelineserver_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for NameNode using a numerical value in the scale of MB</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-env.xml
new file mode 100644
index 0000000..60109c2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-env.xml
@@ -0,0 +1,181 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+  <property>
+    <name>yarn_log_dir_prefix</name>
+    <value>/var/log/hadoop-yarn</value>
+    <description>YARN Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>yarn_pid_dir_prefix</name>
+    <value>/var/run/hadoop-yarn</value>
+    <description>YARN PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>yarn_user</name>
+    <value>yarn</value>
+    <description>YARN User</description>
+  </property>
+  <property>
+    <name>yarn_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
+  </property>
+  <property>
+    <name>resourcemanager_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
+  </property>
+  <property>
+    <name>nodemanager_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
+  </property>
+  <property>
+    <name>apptimelineserver_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for NameNode using a numerical value in the scale of MB</description>
+  </property>
+  <!-- yarn-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for yarn-env.sh file</description>
+    <value>
+export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+export JAVA_HOME={{java64_home}}
+
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+# some Java parameters
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+if [ "$JAVA_HOME" != "" ]; then
+  #echo "run java in $JAVA_HOME"
+  JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+  echo "Error: JAVA_HOME is not set."
+  exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+YARN_HEAPSIZE={{yarn_heapsize}}
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
+
+# Resource Manager specific parameters
+
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_RESOURCEMANAGER_OPTS=
+
+# Node Manager specific parameters
+
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+
+# Specify the max Heapsize for the HistoryManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1024.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_HISTORYSERVER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_NODEMANAGER_OPTS=
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+
+# default log directory and file
+if [ "$YARN_LOG_DIR" = "" ]; then
+  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+  YARN_LOGFILE='yarn.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+  YARN_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/metainfo.xml
index a26b02f..2173b69 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/metainfo.xml
@@ -123,8 +123,9 @@
 
       <configuration-dependencies>
         <config-type>core-site</config-type>
-        <config-type>global</config-type>
+        <config-type>yarn-env</config-type>
         <config-type>mapred-site</config-type>
+        <config-type>mapred-env</config-type>
         <config-type>mapred-queue-acls</config-type>
       </configuration-dependencies>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
index 1dda724..500727c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
@@ -30,12 +30,12 @@ config_dir = "/etc/hadoop/conf"
 
 mapred_user = status_params.mapred_user
 yarn_user = status_params.yarn_user
-hdfs_user = config['configurations']['global']['hdfs_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 
-smokeuser = config['configurations']['global']['smokeuser']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
 yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
 kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 rm_hosts = config['clusterHostInfo']['rm_host']
@@ -49,14 +49,17 @@ hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", Fal
 
 hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
 hadoop_yarn_home = '/usr/lib/hadoop-yarn'
-yarn_heapsize = config['configurations']['global']['yarn_heapsize']
-resourcemanager_heapsize = config['configurations']['global']['resourcemanager_heapsize']
-nodemanager_heapsize = config['configurations']['global']['nodemanager_heapsize']
-apptimelineserver_heapsize = default("/configurations/global/apptimelineserver_heapsize", 1024)
-yarn_log_dir_prefix = config['configurations']['global']['yarn_log_dir_prefix']
+yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
+resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize']
+nodemanager_heapsize = config['configurations']['yarn-env']['nodemanager_heapsize']
+apptimelineserver_heapsize = default("/configurations/yarn-env/apptimelineserver_heapsize", 1024)
+ats_leveldb_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-timeline-store.path']
+yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
 yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
 mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
-mapred_log_dir_prefix = config['configurations']['global']['mapred_log_dir_prefix']
+mapred_log_dir_prefix = config['configurations']['mapred-env']['mapred_log_dir_prefix']
+mapred_env_sh_template = config['configurations']['mapred-env']['content']
+yarn_env_sh_template = config['configurations']['yarn-env']['content']
 
 if len(rm_hosts) > 1:
   additional_rm_host = rm_hosts[1]
@@ -88,7 +91,7 @@ yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduc
 mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
 yarn_bin = "/usr/lib/hadoop-yarn/sbin"
 
-user_group = config['configurations']['global']['user_group']
+user_group = config['configurations']['hadoop-env']['user_group']
 limits_conf_dir = "/etc/security/limits.d"
 hadoop_conf_dir = "/etc/hadoop/conf"
 yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
@@ -100,8 +103,8 @@ exclude_file_path = config['configurations']['yarn-site']['yarn.resourcemanager.
 hostname = config['hostname']
 
 if security_enabled:
-  nm_principal_name = config['configurations']['global']['nodemanager_principal_name']
-  nodemanager_keytab = config['configurations']['global']['nodemanager_keytab']
+  nm_principal_name = config['configurations']['yarn-site']['nodemanager_principal_name']
+  nodemanager_keytab = config['configurations']['yarn-site']['nodemanager_keytab']
   nodemanager_principal_name = nm_principal_name.replace('_HOST',hostname.lower())
   nm_kinit_cmd = format("{kinit_path_local} -kt {nodemanager_keytab} {nodemanager_principal_name};")
 else:
@@ -115,9 +118,8 @@ mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapredu
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-hdfs_principal_name = config['configurations']['global']['hdfs_principal_name']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
@@ -131,3 +133,12 @@ HdfsDirectory = functools.partial(
   kinit_path_local = kinit_path_local
 )
 update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+
+hadoop_bin = "/usr/lib/hadoop/sbin"
+mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
+
+#taskcontroller.cfg
+
+mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+min_user_id = config['configurations']['yarn-env']['min_user_id']

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/status_params.py
index 1a67de7..a3a45be 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/status_params.py
@@ -22,10 +22,10 @@ from resource_management import *
 
 config = Script.get_config()
 
-mapred_user = config['configurations']['global']['mapred_user']
-yarn_user = config['configurations']['global']['yarn_user']
-yarn_pid_dir_prefix = config['configurations']['global']['yarn_pid_dir_prefix']
-mapred_pid_dir_prefix = config['configurations']['global']['mapred_pid_dir_prefix']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+yarn_pid_dir_prefix = config['configurations']['yarn-env']['yarn_pid_dir_prefix']
+mapred_pid_dir_prefix = config['configurations']['mapred-env']['mapred_pid_dir_prefix']
 yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
 mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-web/app/data/HDP2/site_properties.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/HDP2/site_properties.js b/ambari-web/app/data/HDP2/site_properties.js
index a4a7d6f..3ba0246 100644
--- a/ambari-web/app/data/HDP2/site_properties.js
+++ b/ambari-web/app/data/HDP2/site_properties.js
@@ -1597,45 +1597,6 @@ module.exports =
       "serviceName": "PIG",
       "filename": "pig-properties.xml",
       "category": "Advanced pig-properties"
-    },
-
-    //***************************************** GLUSTERFS stack********************************************
-
-    {
-      "id": "site property",
-      "name": "fs.glusterfs.impl",
-      "displayName": "GlusterFS fs impl",
-      "displayType": "string",
-      "filename": "core-site.xml",
-      "serviceName": "GLUSTERFS",
-      "category": "General"
-    },
-    {
-      "id": "site property",
-      "name": "fs.AbstractFileSystem.glusterfs.impl",
-      "displayName": "Abstract File System Implementation",
-      "displayType": "string",
-      "filename": "core-site.xml",
-      "serviceName": "GLUSTERFS",
-      "category": "General"
-    },
-    {
-      "id": "site property",
-      "name": "fs.glusterfs.volumes",
-      "displayName": "Gluster volume name(s)",
-      "displayType": "string",
-      "filename": "core-site.xml",
-      "serviceName": "GLUSTERFS",
-      "category": "General"
-    },
-    {
-      "id": "site property",
-      "name": "fs.glusterfs.volume.fuse.gv0",
-      "displayName": "Gluster mount point for volume",
-      "displayType": "string",
-      "filename": "core-site.xml",
-      "serviceName": "GLUSTERFS",
-      "category": "General"
     }, 
   /********************************************* flume-agent *****************************/
     {
@@ -2131,32 +2092,67 @@ module.exports =
       "filename": "hbase-env.xml",
       "category": "Advanced hbase-env"
     },
+     //***************************************** GLUSTERFS stack********************************************
+    {
+      "id": "site property",
+      "name": "fs.glusterfs.impl",
+      "displayName": "GlusterFS fs impl",
+      "displayType": "string",
+      "filename": "core-site.xml",
+      "serviceName": "GLUSTERFS",
+      "category": "General"
+    },
+    {
+      "id": "site property",
+      "name": "fs.AbstractFileSystem.glusterfs.impl",
+      "displayName": "GlusterFS Abstract File System Implementation",
+      "displayType": "string",
+      "filename": "core-site.xml",
+      "serviceName": "GLUSTERFS",
+      "category": "General"
+    },
+    {
+      "id": "site property",
+      "name": "fs.glusterfs.volumes",
+      "displayName": "Gluster volume name(s)",
+      "displayType": "string",
+      "filename": "core-site.xml",
+      "serviceName": "GLUSTERFS",
+      "category": "General"
+    },
+    {
+      "id": "site property",
+      "name": "fs.glusterfs.volume.fuse.gv0",
+      "displayName": "Gluster mount point for volume",
+      "displayType": "string",
+      "filename": "core-site.xml",
+      "serviceName": "GLUSTERFS",
+      "category": "General"
+    },
   /**********************************************GLUSTERFS***************************************/
     {
       "id": "puppet var",
-      "name": "glusterfs_defaultFS_name",
-      "displayName": "GlusterFS default fs name",
-      "description": "GlusterFS default filesystem name (glusterfs:///)",
+      "name": "fs_glusterfs_default_name",
+      "displayName": "GlusterFS default fs name 1.x Hadoop",
+      "description": "GlusterFS default filesystem name (glusterfs://{MasterFQDN}:9000)",
       "defaultValue": "glusterfs:///localhost:8020",
       "displayType": "string",
       "isVisible": true,
-      "domain": "global",
       "serviceName": "GLUSTERFS",
-      "filename": "glusterfs-env.xml",
-      "category": "General"
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
     },
     {
       "id": "puppet var",
-      "name": "fs_glusterfs_default_name",
-      "displayName": "GlusterFS default fs name",
+      "name": "glusterfs_defaultFS_name",
+      "displayName": "GlusterFS default fs name 2.x Hadoop",
       "description": "GlusterFS default filesystem name (glusterfs:///)",
       "defaultValue": "glusterfs:///localhost:8020",
       "displayType": "string",
       "isVisible": true,
-      "domain": "global",
       "serviceName": "GLUSTERFS",
-      "filename": "glusterfs-env.xml",
-      "category": "General"
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
     },
     {
       "id": "puppet var",
@@ -2167,10 +2163,10 @@ module.exports =
       "displayType": "int",
       "unit": "MB",
       "isVisible": true,
-      "domain": "global",
       "serviceName": "GLUSTERFS",
-      "filename": "glusterfs-env.xml",
-      "category": "General"
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop",
+      "index": 1
     },
     {
       "id": "puppet var",
@@ -2181,10 +2177,10 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "directory",
       "isOverridable": false,
-      "isVisible": true,
+      "isVisible": false,
       "serviceName": "GLUSTERFS",
-      "filename": "glusterfs-env.xml",
-      "category": "Advanced"
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
     },
     {
       "id": "puppet var",
@@ -2195,10 +2191,10 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "directory",
       "isOverridable": false,
-      "isVisible": true,
+      "isVisible": false,
       "serviceName": "GLUSTERFS",
-      "filename": "glusterfs-env.xml",
-      "category": "Advanced"
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
     },
     {
       "id": "puppet var",
@@ -2211,8 +2207,8 @@ module.exports =
       "isOverridable": false,
       "isVisible": false,
       "serviceName": "GLUSTERFS",
-      "filename": "glusterfs-env.xml",
-      "category": "Advanced"
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
     },
     {
       "id": "puppet var",
@@ -2225,8 +2221,8 @@ module.exports =
       "isOverridable": false,
       "isVisible": false,
       "serviceName": "GLUSTERFS",
-      "filename": "glusterfs-env.xml",
-      "category": "Advanced"
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
     },
     {
       "id": "puppet var",
@@ -2239,8 +2235,8 @@ module.exports =
       "isOverridable": false,
       "isVisible": false,
       "serviceName": "GLUSTERFS",
-      "filename": "glusterfs-env.xml",
-      "category": "Advanced"
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
     },
     {
       "id": "puppet var",
@@ -2252,8 +2248,56 @@ module.exports =
       "unit": "MB",
       "isVisible": false,
       "serviceName": "GLUSTERFS",
-      "filename": "glusterfs-env.xml",
-      "category": "Advanced"
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
+    },
+    {
+      "id": "puppet var",
+      "name": "glusterfs_user",
+      "displayName": "glusterfs user",
+      "description": "glusterfs user",
+      "defaultValue": "root",
+      "displayType": "string",
+      "isVisible": false,
+      "serviceName": "GLUSTERFS",
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
+    },
+    {
+      "id": "puppet var",
+      "name": "namenode_host",
+      "displayName": "NameNode Host",
+      "description": "NameNode Host.",
+      "defaultValue": "",
+      "displayType": "string",
+      "isVisible": false,
+      "serviceName": "GLUSTERFS",
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
+    },
+    {
+      "id": "puppet var",
+      "name": "snamenode_host",
+      "displayName": "Secondary NameNode Host",
+      "description": "Secondary NameNode Host.",
+      "defaultValue": "",
+      "displayType": "string",
+      "isVisible": false,
+      "serviceName": "GLUSTERFS",
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
+    },
+    {
+      "id": "puppet var",
+      "name": "content",
+      "displayName": "Hadoop Environment Template",
+      "description": "Hadoop Environment Template.",
+      "defaultValue": "",
+      "displayType": "string",
+      "isVisible": false,
+      "serviceName": "GLUSTERFS",
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
     },
   /**********************************************HIVE***************************************/
     {

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-web/app/data/site_properties.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/site_properties.js b/ambari-web/app/data/site_properties.js
index af85b91..1f6bb63 100644
--- a/ambari-web/app/data/site_properties.js
+++ b/ambari-web/app/data/site_properties.js
@@ -799,43 +799,6 @@ module.exports =
       "filename": "pig-properties.xml",
       "category": "Advanced pig-properties"
     },
-    //***************************************** GLUSTERFS stack********************************************
-    {
-      "id": "site property",
-      "name": "fs.glusterfs.impl",
-      "displayName": "GlusterFS fs impl",
-      "displayType": "string",
-      "filename": "core-site.xml",
-      "serviceName": "GLUSTERFS",
-      "category": "General"
-    },
-    {
-      "id": "site property",
-      "name": "fs.AbstractFileSystem.glusterfs.impl",
-      "displayName": "Abstract File System Implementation",
-      "displayType": "string",
-      "filename": "core-site.xml",
-      "serviceName": "GLUSTERFS",
-      "category": "General"
-    },
-    {
-      "id": "site property",
-      "name": "fs.glusterfs.volumes",
-      "displayName": "Gluster volume name(s)",
-      "displayType": "string",
-      "filename": "core-site.xml",
-      "serviceName": "GLUSTERFS",
-      "category": "General"
-    },
-    {
-      "id": "site property",
-      "name": "fs.glusterfs.volume.fuse.gv0",
-      "displayName": "Gluster mount point for volume",
-      "displayType": "string",
-      "filename": "core-site.xml",
-      "serviceName": "GLUSTERFS",
-      "category": "General"
-    },
   /**********************************************HDFS***************************************/
     {
       "id": "puppet var",
@@ -1015,7 +978,43 @@ module.exports =
       "filename": "hadoop-env.xml",
       "category": "NAMENODE"
     },
-
+    /***************************************** GLUSTERFS stack********************************************/
+    {
+      "id": "site property",
+      "name": "fs.glusterfs.impl",
+      "displayName": "GlusterFS fs impl",
+      "displayType": "string",
+      "filename": "core-site.xml",
+      "serviceName": "GLUSTERFS",
+      "category": "General"
+    },
+    {
+      "id": "site property",
+      "name": "fs.AbstractFileSystem.glusterfs.impl",
+      "displayName": "GlusterFS Abstract File System Implementation",
+      "displayType": "string",
+      "filename": "core-site.xml",
+      "serviceName": "GLUSTERFS",
+      "category": "General"
+    },
+    {
+      "id": "site property",
+      "name": "fs.glusterfs.volumes",
+      "displayName": "Gluster volume name(s)",
+      "displayType": "string",
+      "filename": "core-site.xml",
+      "serviceName": "GLUSTERFS",
+      "category": "General"
+    },
+    {
+      "id": "site property",
+      "name": "fs.glusterfs.volume.fuse.gv0",
+      "displayName": "Gluster mount point for volume",
+      "displayType": "string",
+      "filename": "core-site.xml",
+      "serviceName": "GLUSTERFS",
+      "category": "General"
+    },
   /**********************************************GLUSTERFS***************************************/
     {
       "id": "puppet var",
@@ -1025,10 +1024,9 @@ module.exports =
       "defaultValue": "glusterfs:///localhost:8020",
       "displayType": "string",
       "isVisible": true,
-      "domain": "global",
       "serviceName": "GLUSTERFS",
       "filename": "hadoop-env.xml",
-      "category": "General"
+      "category": "General Hadoop"
     },
     {
       "id": "puppet var",
@@ -1038,23 +1036,9 @@ module.exports =
       "defaultValue": "glusterfs:///localhost:8020",
       "displayType": "string",
       "isVisible": true,
-      "domain": "global",
-      "serviceName": "GLUSTERFS",
-      "filename": "hadoop-env.xml",
-      "category": "General"
-    },
-    {
-      "id": "puppet var",
-      "name": "fs_AbstractFileSystem_glusterfs_impl",
-      "displayName": "GlusterFS Abstract Filesystem declaration",
-      "description": "GlusterFS Abstract Filesystem declaration",
-      "defaultValue": "org.apache.hadoop.fs.local.GlusterFs",
-      "displayType": "string",
-      "isVisible": true,
-      "domain": "global",
       "serviceName": "GLUSTERFS",
       "filename": "hadoop-env.xml",
-      "category": "General"
+      "category": "General Hadoop"
     },
     {
       "id": "puppet var",
@@ -1067,7 +1051,7 @@ module.exports =
       "isVisible": true,
       "serviceName": "GLUSTERFS",
       "filename": "hadoop-env.xml",
-      "category": "General",
+      "category": "General Hadoop",
       "index": 1
     },
     {
@@ -1079,10 +1063,10 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "directory",
       "isOverridable": false,
-      "isVisible": true,
+      "isVisible": false,
       "serviceName": "GLUSTERFS",
       "filename": "hadoop-env.xml",
-      "category": "Advanced hadoop-env"
+      "category": "General Hadoop"
     },
     {
       "id": "puppet var",
@@ -1093,10 +1077,10 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "directory",
       "isOverridable": false,
-      "isVisible": true,
+      "isVisible": false,
       "serviceName": "GLUSTERFS",
       "filename": "hadoop-env.xml",
-      "category": "Advanced hadoop-env"
+      "category": "General Hadoop"
     },
     {
       "id": "puppet var",
@@ -1110,7 +1094,7 @@ module.exports =
       "isVisible": false,
       "serviceName": "GLUSTERFS",
       "filename": "hadoop-env.xml",
-      "category": "Advanced hadoop-env"
+      "category": "General Hadoop"
     },
     {
       "id": "puppet var",
@@ -1124,7 +1108,7 @@ module.exports =
       "isVisible": false,
       "serviceName": "GLUSTERFS",
       "filename": "hadoop-env.xml",
-      "category": "Advanced hadoop-env"
+      "category": "General Hadoop"
     },
     {
       "id": "puppet var",
@@ -1138,7 +1122,7 @@ module.exports =
       "isVisible": false,
       "serviceName": "GLUSTERFS",
       "filename": "hadoop-env.xml",
-      "category": "Advanced hadoop-env"
+      "category": "General Hadoop"
     },
     {
       "id": "puppet var",
@@ -1152,7 +1136,55 @@ module.exports =
       "domain": "datanode-global",
       "serviceName": "GLUSTERFS",
       "filename": "hadoop-env.xml",
-      "category": "Advanced hadoop-env"
+      "category": "General Hadoop"
+    },
+    {
+      "id": "puppet var",
+      "name": "glusterfs_user",
+      "displayName": "glusterfs user",
+      "description": "glusterfs user",
+      "defaultValue": "root",
+      "displayType": "string",
+      "isVisible": false,
+      "serviceName": "GLUSTERFS",
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
+    },
+    {
+      "id": "puppet var",
+      "name": "namenode_host",
+      "displayName": "NameNode Host",
+      "description": "NameNode Host.",
+      "defaultValue": "",
+      "displayType": "string",
+      "isVisible": false,
+      "serviceName": "GLUSTERFS",
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
+    },
+    {
+      "id": "puppet var",
+      "name": "snamenode_host",
+      "displayName": "Secondary NameNode Host",
+      "description": "Secondary NameNode Host.",
+      "defaultValue": "",
+      "displayType": "string",
+      "isVisible": false,
+      "serviceName": "GLUSTERFS",
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
+    },
+    {
+      "id": "puppet var",
+      "name": "content",
+      "displayName": "Hadoop Environment Template",
+      "description": "Hadoop Environment Template.",
+      "defaultValue": "",
+      "displayType": "string",
+      "isVisible": false,
+      "serviceName": "GLUSTERFS",
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
     },
   /**********************************************MAPREDUCE***************************************/
     {

http://git-wip-us.apache.org/repos/asf/ambari/blob/fefc129b/ambari-web/app/models/stack_service.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/stack_service.js b/ambari-web/app/models/stack_service.js
index e5535eb..744083b 100644
--- a/ambari-web/app/models/stack_service.js
+++ b/ambari-web/app/models/stack_service.js
@@ -63,7 +63,7 @@ App.StackService = DS.Model.extend(App.ServiceModelMixin, {
 
   configTypesRendered: function () {
     var configTypes = this.get('configTypes');
-    if (this.get('serviceName') == 'HDFS') return configTypes;
+    if (this.get('serviceName') == 'HDFS' || this.get('serviceName') == 'GLUSTERFS') return configTypes;
     else {
       var renderedConfigTypes = $.extend(true, {}, configTypes);
       delete renderedConfigTypes['core-site'];