You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ja...@apache.org on 2014/09/05 20:10:49 UTC

[2/4] AMBARI-7162. Refactor ambari-web code and ambari-agent code to use cluster-env configuration. (jaimin)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
index 769e13d..466e9f8 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
@@ -365,21 +365,23 @@
             "resourcemanager_heapsize": "1024", 
             "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
             "min_user_id": "1000"
-        }, 
+        },
+        "cluster-env": {
+            "security_enabled": "false",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop"
+        },
         "hadoop-env": {
-            "security_enabled": "false", 
-            "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTime
 Stamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize
 }} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where ha
 doop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add lib
 raries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64", 
-            "hdfs_user": "hdfs", 
-            "user_group": "hadoop", 
+            "hdfs_user": "hdfs",
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
index 02900f6..7ab5885 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
@@ -344,27 +344,30 @@
             "resourcemanager_heapsize": "1024", 
             "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
             "min_user_id": "1000" 
-        }, 
+        },
+        "cluster-env": {
+            "security_enabled": "true",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop",
+            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab",
+            "kinit_path_local": "/usr/bin"
+        },
         "hadoop-env": {
-            "security_enabled": "false", 
-            "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTime
 Stamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize
 }} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where ha
 doop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add lib
 raries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64", 
-            "hdfs_user": "hdfs", 
-            "user_group": "hadoop", 
+            "hdfs_user": "hdfs",
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop",
-			"jobhistory_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
+			      "jobhistory_http_keytab": "/etc/security/keytabs/spnego.service.keytab",
             "resourcemanager_principal_name": "rm/_HOST", 
-            "hadoop_http_principal_name": "HTTP/_HOST", 
-            "kinit_path_local": "/usr/bin",
+            "hadoop_http_principal_name": "HTTP/_HOST",
             "nagios_keytab_path": "/etc/security/keytabs/nagios.service.keytab", 
             "namenode_principal_name": "nn/_HOST", 
             "namenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
@@ -375,9 +378,7 @@
             "datanode_principal_name": "dn/_HOST", 
             "hive_metastore_keytab": "/etc/security/keytabs/hive.service.keytab", 
             "jobhistory_keytab": "/etc/security/keytabs/jhs.service.keytab",
-            "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab", 
-            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab",
             "snamenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
             "oozie_principal_name": "oozie/c6402.ambari.apache.org", 
             "hive_metastore_principal_name": "hive/_HOST", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
index 42b28f9..f407b80 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
@@ -65,21 +65,23 @@
             "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2", 
             "yarn.scheduler.capacity.root.acl_administer_queue": "*", 
             "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*"
-        }, 
+        },
+        "cluster-env": {
+            "security_enabled": "false",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop"
+        },
         "hadoop-env": {
-            "security_enabled": "false", 
-            "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTime
 Stamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize
 }} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where ha
 doop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add lib
 raries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64", 
-            "hdfs_user": "hdfs", 
-            "user_group": "hadoop", 
+            "hdfs_user": "hdfs",
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop"
         }, 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index 8aa45c9..fc6c063 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -466,25 +466,28 @@
             "resourcemanager_heapsize": "1024", 
             "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
             "min_user_id": "1000"
-        }, 
+        },
+        "cluster-env": {
+            "security_enabled": "true",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop",
+            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab",
+            "kinit_path_local": "/usr/bin"
+        },
         "hadoop-env": {
-            "security_enabled": "true", 
-            "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTime
 Stamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize
 }} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where ha
 doop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add lib
 raries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64", 
             "hdfs_user": "hdfs",
             "hdfs_principal_name": "hdfs",
-            "user_group": "hadoop",
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab",
             "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab"
         },
         "hive-env": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
index eeb296b..5edf545 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
@@ -466,25 +466,28 @@
             "resourcemanager_heapsize": "1024", 
             "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
             "min_user_id": "1000"
-        }, 
+        },
+        "cluster-env": {
+            "security_enabled": "true",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop",
+            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab",
+            "kinit_path_local": "/usr/bin"
+        },
         "hadoop-env": {
-            "security_enabled": "true", 
-            "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTime
 Stamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize
 }} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where ha
 doop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add lib
 raries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64", 
             "hdfs_user": "hdfs",
             "hdfs_principal_name": "hdfs",
-            "user_group": "hadoop",
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab",
             "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab"
         },
         "hive-env": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/2.1/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/default.json b/ambari-server/src/test/python/stacks/2.1/configs/default.json
index 143c78a..64b716b 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/default.json
@@ -541,19 +541,14 @@
             "yarn_log_dir_prefix": "/var/log/hadoop-yarn"
         }, 
         "hadoop-env": {
-            "security_enabled": "false", 
             "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTime
 Stamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize
 }} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where ha
 doop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add lib
 raries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64", 
-            "hdfs_user": "hdfs", 
-            "user_group": "hadoop", 
+            "hdfs_user": "hdfs",
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
@@ -662,7 +657,14 @@
             "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"",
             "sqoop_user": "sqoop"
         },
-        "hdfs-log4j": {
+      "cluster-env": {
+        "security_enabled": "false",
+        "ignore_groupsusers_create": "false",
+        "smokeuser": "ambari-qa",
+        "kerberos_domain": "EXAMPLE.COM",
+        "user_group": "hadoop"
+      },
+      "hdfs-log4j": {
             "property1": "value1"
         },
         "yarn-log4j": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-server/src/test/python/stacks/2.1/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured.json b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
index a4a2ddc..c344416 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
@@ -511,25 +511,28 @@
             "yarn_user": "yarn", 
             "resourcemanager_heapsize": "1024", 
             "yarn_log_dir_prefix": "/var/log/hadoop-yarn"
-        }, 
+        },
+        "cluster-env": {
+            "security_enabled": "true",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop",
+            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab",
+            "kinit_path_local": "/usr/bin"
+        },
         "hadoop-env": {
-            "security_enabled": "true", 
-            "namenode_opt_maxnewsize": "200m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "ignore_groupsusers_create": "false", 
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
             "namenode_heapsize": "1024m", 
-            "namenode_opt_newsize": "200m", 
-            "kerberos_domain": "EXAMPLE.COM", 
+            "namenode_opt_newsize": "200m",
             "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTime
 Stamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize
 }} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where ha
 doop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add lib
 raries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64", 
             "hdfs_user": "hdfs",
             "hdfs_principal_name": "hdfs",
-            "user_group": "hadoop",
             "dtnode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "smokeuser": "ambari-qa", 
+            "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab",
             "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab"
         },
         "hive-env": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-web/app/controllers/main/admin/security.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/security.js b/ambari-web/app/controllers/main/admin/security.js
index cdc17f1..5cd8a63 100644
--- a/ambari-web/app/controllers/main/admin/security.js
+++ b/ambari-web/app/controllers/main/admin/security.js
@@ -274,39 +274,43 @@ App.MainAdminSecurityController = Em.Controller.extend({
 
   getSecurityStatusFromServerSuccessCallback: function (data) {
     var configs = data.Clusters.desired_configs;
+    var tags = [];
     this.set('desiredConfigs', configs);
-    if ('hadoop-env' in configs && 'hdfs-site' in configs) {
-      this.set('tag.hadoop-env', configs['hadoop-env'].tag);
-      this.set('tag.hdfs-site', configs['hdfs-site'].tag);
-      this.getServiceConfigsFromServer();
-    }
-    else {
+
+    if  ('cluster-env' in configs) {
+      this.set('tag.cluster-env', configs['cluster-env'].tag);
+      tags.pushObject({
+        siteName: "cluster-env",
+        tagName: this.get('tag.cluster-env')
+      });
+    } else {
       this.showSecurityErrorPopup();
     }
-  },
 
-  getServiceConfigsFromServer: function () {
-    var tags = [
-      {
-        siteName: "hadoop-env",
-        tagName: this.get('tag.hadoop-env')
-      },
-      {
+    if ('hdfs-site' in configs) {
+      this.set('tag.hdfs-site', configs['hdfs-site'].tag);
+      tags.pushObject({
         siteName: "hdfs-site",
         tagName: this.get('tag.hdfs-site')
-      }
-    ];
+      });
+    }
+    this.getServiceConfigsFromServer(tags);
+  },
+
+  getServiceConfigsFromServer: function (tags) {
     var self = this;
 
     App.router.get('configurationController').getConfigsByTags(tags).done(function(data) {
-      var configs = data.findProperty('tag', self.get('tag.hadoop-env')).properties;
+      var configs = data.findProperty('tag', self.get('tag.cluster-env')).properties;
       if (configs && (configs['security_enabled'] === 'true' || configs['security_enabled'] === true)) {
         self.set('securityEnabled', true);
       }
       else {
         self.set('securityEnabled', false);
-        var hdfsConfigs = data.findProperty('tag', self.get('tag.hdfs-site')).properties;
-        self.setNnHaStatus(hdfsConfigs);
+        if (!!self.get('tag.hdfs-site')) {
+          var hdfsConfigs = data.findProperty('tag', self.get('tag.hdfs-site')).properties;
+          self.setNnHaStatus(hdfsConfigs);
+        }
       }
       self.loadUsers(configs);
       self.set('dataIsLoaded', true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-web/app/controllers/main/admin/security/add/step3.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/security/add/step3.js b/ambari-web/app/controllers/main/admin/security/add/step3.js
index a58f028..8f6fd67 100644
--- a/ambari-web/app/controllers/main/admin/security/add/step3.js
+++ b/ambari-web/app/controllers/main/admin/security/add/step3.js
@@ -116,7 +116,8 @@ App.MainAdminSecurityAddStep3Controller = Em.Controller.extend({
     {
       userConfig: 'hdfs_user',
       keytab: 'hdfs_user_keytab',
-      displayName: Em.I18n.t('admin.addSecurity.user.hdfsUser')
+      displayName: Em.I18n.t('admin.addSecurity.user.hdfsUser'),
+      checkService: 'HDFS'
     },
     {
       userConfig: 'hbase_user',

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-web/app/controllers/main/admin/security/disable.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/security/disable.js b/ambari-web/app/controllers/main/admin/security/disable.js
index 09681ea..51b6f0e 100644
--- a/ambari-web/app/controllers/main/admin/security/disable.js
+++ b/ambari-web/app/controllers/main/admin/security/disable.js
@@ -148,7 +148,9 @@ App.MainAdminSecurityDisableController = App.MainAdminSecurityProgressController
         _serviceConfigTags.newTagName = 'version' + (new Date).getTime();
         if (_serviceConfigTags.siteName.contains('-env')) {
           this.deleteDisabledConfigs(secureProperties, _serviceConfigTags);
-          _serviceConfigTags.configs.security_enabled = 'false';
+          if (_serviceConfigTags.siteName === 'cluster-env') {
+            _serviceConfigTags.configs.security_enabled = 'false';
+          }
         } else {
           this.modifySiteConfigs(secureMapping, _serviceConfigTags);
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-web/app/controllers/main/admin/security/security_progress_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/security/security_progress_controller.js b/ambari-web/app/controllers/main/admin/security/security_progress_controller.js
index 4eb2fb6..5aacb56 100644
--- a/ambari-web/app/controllers/main/admin/security/security_progress_controller.js
+++ b/ambari-web/app/controllers/main/admin/security/security_progress_controller.js
@@ -311,6 +311,14 @@ App.MainAdminSecurityProgressController = Em.Controller.extend({
       }
     }, this);
 
+    var clusterConfig = configData.findProperty('type', 'cluster-env');
+    if (clusterConfig) {
+      allConfigData.pushObject(JSON.stringify({
+        Clusters: {
+          desired_config: [clusterConfig]
+        }
+      }));
+    }
     App.ajax.send({
       name: 'common.across.services.configurations',
       sender: this,

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-web/app/controllers/main/service/reassign_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/reassign_controller.js b/ambari-web/app/controllers/main/service/reassign_controller.js
index c71cecb..bbaf04b 100644
--- a/ambari-web/app/controllers/main/service/reassign_controller.js
+++ b/ambari-web/app/controllers/main/service/reassign_controller.js
@@ -102,8 +102,8 @@ App.ReassignMasterController = App.WizardController.extend({
 
   getSecurityStatusSuccessCallback: function (data) {
     var configs = data.Clusters.desired_configs;
-    if ('hadoop-env' in configs) {
-      this.getServiceConfigsFromServer(configs['hadoop-env'].tag);
+    if ('cluster-env' in configs) {
+      this.getServiceConfigsFromServer(configs['cluster-env'].tag);
     }
     else {
       console.error('Cannot get security status from server');
@@ -114,7 +114,7 @@ App.ReassignMasterController = App.WizardController.extend({
     var self = this;
     var tags = [
       {
-        siteName: "hadoop-env",
+        siteName: "cluster-env",
         tagName: tag
       }
     ];

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-web/app/controllers/wizard.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard.js b/ambari-web/app/controllers/wizard.js
index e84ce44..eeffdfd 100644
--- a/ambari-web/app/controllers/wizard.js
+++ b/ambari-web/app/controllers/wizard.js
@@ -774,38 +774,44 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, {
    */
   loadAdvancedConfigs: function (dependentController) {
     var self = this;
-    var stackServices = this.get('content.services').filter(function (service) {
-      return service.get('isInstalled') || service.get('isSelected');
-    });
-    var counter = stackServices.length;
-    var loadAdvancedConfigResult = [];
-    dependentController.set('isAdvancedConfigLoaded', false);
-    stackServices.forEach(function (service) {
-      var serviceName = service.get('serviceName');
-      App.config.loadAdvancedConfig(serviceName, function (properties) {
-        var supportsFinal = App.config.getConfigTypesInfoFromService(service).supportsFinal;
-
-        function shouldSupportFinal(filename) {
-          var matchingConfigType = supportsFinal.find(function (configType) {
-            return filename.startsWith(configType);
-          });
-          return !!matchingConfigType;
-        }
+    var loadServiceConfigsFn = function(clusterProperties) {
+      var stackServices = self.get('content.services').filter(function (service) {
+        return service.get('isInstalled') || service.get('isSelected');
+      });
+      var counter = stackServices.length;
+      var loadAdvancedConfigResult = [];
+      dependentController.set('isAdvancedConfigLoaded', false);
+      stackServices.forEach(function (service) {
+        var serviceName = service.get('serviceName');
+        App.config.loadAdvancedConfig(serviceName, function (properties) {
+          var supportsFinal = App.config.getConfigTypesInfoFromService(service).supportsFinal;
+
+          function shouldSupportFinal(filename) {
+            var matchingConfigType = supportsFinal.find(function (configType) {
+              return filename.startsWith(configType);
+            });
+            return !!matchingConfigType;
+          }
 
-        properties.forEach(function (property) {
-          property.supportsFinal = shouldSupportFinal(property.filename);
+          properties.forEach(function (property) {
+            property.supportsFinal = shouldSupportFinal(property.filename);
+          });
+          loadAdvancedConfigResult.pushObjects(properties);
+          counter--;
+          //pass configs to controller after last call is completed
+          if (counter === 0) {
+            loadAdvancedConfigResult.pushObjects(clusterProperties);
+            self.set('content.advancedServiceConfig', loadAdvancedConfigResult);
+            self.setDBProperty('advancedServiceConfig', loadAdvancedConfigResult);
+            dependentController.set('isAdvancedConfigLoaded', true);
+          }
         });
-        loadAdvancedConfigResult.pushObjects(properties);
-        counter--;
-        //pass configs to controller after last call is completed
-        if (counter === 0) {
-          self.set('content.advancedServiceConfig', loadAdvancedConfigResult);
-          self.setDBProperty('advancedServiceConfig', loadAdvancedConfigResult);
-          dependentController.set('isAdvancedConfigLoaded', true);
-        }
-      });
-    }, this);
+      }, this);
+    };
+    App.config.loadClusterConfig(loadServiceConfigsFn);
   },
+
+
   /**
    * Load serviceConfigProperties to model
    */
@@ -846,7 +852,7 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, {
           hasInitialValue: !!_configProperties.get('hasInitialValue'),
           isRequired: _configProperties.get('isRequired'), // flag that allow saving property with empty value
           group: !!_configProperties.get('group') ? _configProperties.get('group.name') : null,
-          showLabel: _configProperties.get('showLabel'),
+          showLabel: _configProperties.get('showLabel')
         };
         serviceConfigProperties.push(configProperty);
       }, this);

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-web/app/controllers/wizard/step8_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step8_controller.js b/ambari-web/app/controllers/wizard/step8_controller.js
index 61daa1f..2d511c2 100644
--- a/ambari-web/app/controllers/wizard/step8_controller.js
+++ b/ambari-web/app/controllers/wizard/step8_controller.js
@@ -1404,6 +1404,11 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, {
     var selectedServices = this.get('selectedServices');
     var coreSiteObject = this.createCoreSiteObj();
     var tag = 'version1';
+    var clusterSiteObj = this.createSiteObj('cluster-env', true, tag);
+
+    if (this.get('content.controllerName') == 'installerController') {
+      this.get('serviceConfigTags').pushObject(clusterSiteObj);
+    }
 
     if (this.get('content.controllerName') == 'addServiceController') {
       tag = 'version' + (new Date).getTime();
@@ -1466,6 +1471,15 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, {
       }));
     }, this);
 
+    var clusterConfig = serviceConfigTags.findProperty('type', 'cluster-env');
+    if (clusterConfig) {
+      allConfigData.pushObject(JSON.stringify({
+        Clusters: {
+          desired_config: [clusterConfig]
+        }
+      }));
+    }
+
     this.addRequestToAjaxQueue({
       name: 'common.across.services.configurations',
       data: {
@@ -1621,7 +1635,7 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, {
   /**
    * Create siteObj for custom service with it own configs
    * @param {string} site
-   * @param {bool} isNonXmlFile
+   * @param {boolean} isNonXmlFile
    * @param tag
    * @returns {{type: string, tag: string, properties: {}}}
    * @method createSiteObj

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-web/app/data/HDP2/secure_configs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/HDP2/secure_configs.js b/ambari-web/app/data/HDP2/secure_configs.js
index 99bc5d5..1412eb3 100644
--- a/ambari-web/app/data/HDP2/secure_configs.js
+++ b/ambari-web/app/data/HDP2/secure_configs.js
@@ -31,7 +31,7 @@ module.exports = [
       App.ServiceConfigCategory.create({ name: 'KERBEROS', displayName: 'Kerberos'}),
       App.ServiceConfigCategory.create({ name: 'AMBARI', displayName: 'Ambari'})
     ],
-    sites: ['hadoop-env', 'oozie-env', 'nagios-env', 'zookeeper-env', 'storm-env', 'hbase-env'],
+    sites: ['cluster-env'],
     configs: configProperties.filterProperty('serviceName', 'GENERAL')
   },
   {
@@ -45,7 +45,7 @@ module.exports = [
       App.ServiceConfigCategory.create({ name: 'JournalNode', displayName: 'JournalNode',exclude:'non-HA'}),
       App.ServiceConfigCategory.create({ name: 'DataNode', displayName: 'DataNode'})
     ],
-    sites: ['core-site', 'hdfs-site'],
+    sites: ['hadoop-env','core-site', 'hdfs-site'],
     configs: configProperties.filterProperty('serviceName', 'HDFS')
   },
   {
@@ -97,7 +97,7 @@ module.exports = [
       App.ServiceConfigCategory.create({ name: 'HBase Master', displayName: 'HBase Master'}),
       App.ServiceConfigCategory.create({ name: 'RegionServer', displayName: 'RegionServer'})
     ],
-    sites: ['hbase-site'],
+    sites: ['hbase-env','hbase-site'],
     configs: configProperties.filterProperty('serviceName', 'HBASE')
   },
   {
@@ -106,6 +106,7 @@ module.exports = [
     configCategories: [
       App.ServiceConfigCategory.create({ name: 'ZooKeeper Server', displayName: 'ZooKeeper Server'})
     ],
+    sites: ['zookeeper-env'],
     configs: configProperties.filterProperty('serviceName', 'ZOOKEEPER')
 
   },
@@ -116,7 +117,7 @@ module.exports = [
     configCategories: [
       App.ServiceConfigCategory.create({ name: 'Oozie Server', displayName:  'Oozie Server'})
     ],
-    sites: ['oozie-site'],
+    sites: ['oozie-env','oozie-site'],
     configs: configProperties.filterProperty('serviceName', 'OOZIE')
   },
   {
@@ -125,6 +126,7 @@ module.exports = [
     configCategories: [
       App.ServiceConfigCategory.create({ name: 'Nagios Server', displayName:  'Nagios Server'})
     ],
+    sites: ['nagios-env'],
     configs: configProperties.filterProperty('serviceName', 'NAGIOS')
   },
   {
@@ -134,7 +136,7 @@ module.exports = [
     configCategories: [
       App.ServiceConfigCategory.create({ name: 'Storm Topology', displayName:  'Storm Topology'})
     ],
-    sites: ['storm-site'],
+    sites: ['storm-env','storm-site'],
     configs: configProperties.filterProperty('serviceName', 'STORM')
   },
   {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-web/app/data/HDP2/secure_properties.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/HDP2/secure_properties.js b/ambari-web/app/data/HDP2/secure_properties.js
index 8b93c6e..ca91978 100644
--- a/ambari-web/app/data/HDP2/secure_properties.js
+++ b/ambari-web/app/data/HDP2/secure_properties.js
@@ -28,7 +28,7 @@ module.exports =
       "isVisible": false,
       "isOverridable": false,
       "serviceName": "GENERAL",
-      "filename": "hadoop-env.xml",
+      "filename": "cluster-env.xml",
       "category": "KERBEROS"
     },
     {
@@ -45,19 +45,6 @@ module.exports =
     },
     {
       "id": "puppet var",
-      "name": "keytab_path",
-      "displayName": "Path to keytab file",
-      "value": "",
-      "defaultValue": "/etc/security/keytabs",
-      "description": "Type of kerberos security for the cluster",
-      "displayType": "principal",
-      "isVisible": false,
-      "isOverridable": false,
-      "serviceName": "GENERAL",
-      "category": "AMBARI"
-    },
-    {
-      "id": "puppet var",
       "name": "kerberos_domain",
       "displayName": "Realm name",
       "value": "",
@@ -67,7 +54,7 @@ module.exports =
       "isVisible": true,
       "isOverridable": false,
       "serviceName": "GENERAL",
-      "filename": "hadoop-env.xml",
+      "filename": "cluster-env.xml",
       "category": "KERBEROS"
     },
     {
@@ -81,6 +68,7 @@ module.exports =
       "isVisible": true,
       "isOverridable": false,
       "serviceName": "GENERAL",
+      "filename": "cluster-env.xml",
       "category": "KERBEROS"
     },
     {
@@ -108,7 +96,7 @@ module.exports =
       "isVisible": true,
       "isOverridable": false,
       "serviceName": "GENERAL",
-      "filename": "hadoop-env.xml",
+      "filename": "cluster-env.xml",
       "category": "AMBARI"
     },
     {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-web/app/data/HDP2/site_properties.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/HDP2/site_properties.js b/ambari-web/app/data/HDP2/site_properties.js
index f4a0e14..edcb322 100644
--- a/ambari-web/app/data/HDP2/site_properties.js
+++ b/ambari-web/app/data/HDP2/site_properties.js
@@ -1784,11 +1784,23 @@ module.exports =
       "displayType": "checkbox",
       "isOverridable": false,
       "isVisible": false,
-      "serviceName": "HDFS",
-      "filename": "hadoop-env.xml",
-      "category": "Advanced hadoop-env"
+      "serviceName": "MISC",
+      "filename": "cluster-env.xml"
+    },
+    {
+      "id": "puppet var",
+      "name": "kerberos_domain",
+      "displayName": "Kerberos realm",
+      "description": "Kerberos realm",
+      "defaultValue": 'EXAMPLE.COM',
+      "isRequired": false,
+      "isOverridable": false,
+      "isVisible": false,
+      "serviceName": "MISC",
+      "filename": "cluster-env.xml"
     },
 
+
   /**********************************************MAPREDUCE2***************************************/
     {
       "id": "puppet var",
@@ -3605,9 +3617,8 @@ module.exports =
       "isOverridable": false,
       "isVisible": App.supports.customizeSmokeTestUser,
       "serviceName": "MISC",
-      "filename": "hadoop-env.xml",
+      "filename": "cluster-env.xml",
       "category": "Users and Groups",
-      "belongsToService": ["HDFS"],
       "index": 16
     },
     {
@@ -3637,9 +3648,8 @@ module.exports =
       "isOverridable": false,
       "isVisible": true,
       "serviceName": "MISC",
-      "filename": "hadoop-env.xml",
+      "filename": "cluster-env.xml",
       "category": "Users and Groups",
-      "belongsToService": ["HDFS"],
       "index": 17
     },
     {
@@ -3680,9 +3690,8 @@ module.exports =
       "isOverridable": false,
       "isVisible": true,
       "serviceName": "MISC",
-      "filename": "hadoop-env.xml",
-      "category": "Users and Groups",
-      "belongsToService": ["HDFS"]
+      "filename": "cluster-env.xml",
+      "category": "Users and Groups"
     }
   ]
 };

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-web/app/data/secure_configs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/secure_configs.js b/ambari-web/app/data/secure_configs.js
index c4aa0ca..c0b4dba 100644
--- a/ambari-web/app/data/secure_configs.js
+++ b/ambari-web/app/data/secure_configs.js
@@ -31,7 +31,7 @@ module.exports = [
       App.ServiceConfigCategory.create({ name: 'KERBEROS', displayName: 'Kerberos'}),
       App.ServiceConfigCategory.create({ name: 'AMBARI', displayName: 'Ambari'})
     ],
-    sites: ['hadoop-env', 'oozie-env', 'nagios-env', 'zookeeper-env', 'storm-env', 'hbase-env'],
+    sites: ['cluster-env'],
     configs: configProperties.filterProperty('serviceName', 'GENERAL')
   },
   {
@@ -44,7 +44,7 @@ module.exports = [
       App.ServiceConfigCategory.create({ name: 'SNameNode', displayName: 'Secondary NameNode'}),
       App.ServiceConfigCategory.create({ name: 'DataNode', displayName: 'DataNode'})
     ],
-    sites: ['core-site', 'hdfs-site'],
+    sites: ['hadoop-env','core-site', 'hdfs-site'],
     configs: configProperties.filterProperty('serviceName', 'HDFS')
   },
   {
@@ -87,7 +87,7 @@ module.exports = [
       App.ServiceConfigCategory.create({ name: 'HBase Master', displayName: 'HBase Master'}),
       App.ServiceConfigCategory.create({ name: 'RegionServer', displayName: 'RegionServer'})
     ],
-    sites: ['hbase-site'],
+    sites: ['hbase-env','hbase-site'],
     configs: configProperties.filterProperty('serviceName', 'HBASE')
   },
   {
@@ -96,6 +96,7 @@ module.exports = [
     configCategories: [
       App.ServiceConfigCategory.create({ name: 'ZooKeeper Server', displayName: 'ZooKeeper Server'})
     ],
+    sites: ['zookeeper-env'],
     configs: configProperties.filterProperty('serviceName', 'ZOOKEEPER')
 
   },
@@ -106,7 +107,7 @@ module.exports = [
     configCategories: [
       App.ServiceConfigCategory.create({ name: 'Oozie Server', displayName:  'Oozie Server'})
     ],
-    sites: ['oozie-site'],
+    sites: ['oozie-env','oozie-site'],
     configs: configProperties.filterProperty('serviceName', 'OOZIE')
   },
   {
@@ -115,6 +116,7 @@ module.exports = [
     configCategories: [
       App.ServiceConfigCategory.create({ name: 'Nagios Server', displayName:  'Nagios Server'})
     ],
+    sites: ['nagios-env'],
     configs: configProperties.filterProperty('serviceName', 'NAGIOS')
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-web/app/data/secure_properties.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/secure_properties.js b/ambari-web/app/data/secure_properties.js
index ad7adc4..ba1e26d 100644
--- a/ambari-web/app/data/secure_properties.js
+++ b/ambari-web/app/data/secure_properties.js
@@ -28,7 +28,7 @@ module.exports =
       "isVisible": false,
       "isOverridable": false,
       "serviceName": "GENERAL",
-      "filename": "hadoop-env.xml",
+      "filename": "cluster-env.xml",
       "category": "KERBEROS"
     },
     {
@@ -54,6 +54,7 @@ module.exports =
       "isVisible": false,
       "isOverridable": false,
       "serviceName": "GENERAL",
+      "filename": "cluster-env.xml",
       "category": "AMBARI"
     },
     {
@@ -67,7 +68,7 @@ module.exports =
       "isVisible": true,
       "isOverridable": false,
       "serviceName": "GENERAL",
-      "filename": "hadoop-env.xml",
+      "filename": "cluster-env.xml",
       "category": "KERBEROS"
     },
     {
@@ -81,6 +82,7 @@ module.exports =
       "isVisible": true,
       "isOverridable": false,
       "serviceName": "GENERAL",
+      "filename": "cluster-env.xml",
       "category": "KERBEROS"
     },
     {
@@ -108,7 +110,7 @@ module.exports =
       "isVisible": true,
       "isOverridable": false,
       "serviceName": "GENERAL",
-      "filename": "hadoop-env.xml",
+      "filename": "cluster-env.xml",
       "category": "AMBARI"
     },
     {
@@ -750,21 +752,6 @@ module.exports =
       "category": "ZooKeeper Server",
       "component": "ZOOKEEPER_SERVER"
     },
-    {
-      "id": "puppet var",
-      "name": "zookeeper_keytab_path",
-      "displayName": "Path to keytab file",
-      "value": "",
-      "defaultValue": "/etc/security/keytabs/zk.service.keytab",
-      "description": "Path to ZooKeeper keytab file",
-      "displayType": "directory",
-      "isVisible": true,
-      "isOverridable": false,
-      "serviceName": "ZOOKEEPER",
-      "filename": "zookeeper-env.xml",
-      "category": "ZooKeeper Server",
-      "component": "ZOOKEEPER_SERVER"
-    },
     //NAGIOS
     {
       "id": "puppet var",

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-web/app/data/service_configs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/service_configs.js b/ambari-web/app/data/service_configs.js
index f7e4442..a7acbe3 100644
--- a/ambari-web/app/data/service_configs.js
+++ b/ambari-web/app/data/service_configs.js
@@ -38,6 +38,7 @@ module.exports = [
       App.ServiceConfigCategory.create({ name: 'Users and Groups', displayName : 'Users and Groups'})
     ],
     configTypes: {
+      "cluster-env": {supports: {final: false}},
       "hadoop-env": {supports: {final: false}},
       "mapred-env": {supports: {final: false}},
       "yarn-env": {supports: {final: false}},

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-web/app/data/site_properties.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/site_properties.js b/ambari-web/app/data/site_properties.js
index 738411a..cf837f0 100644
--- a/ambari-web/app/data/site_properties.js
+++ b/ambari-web/app/data/site_properties.js
@@ -973,9 +973,20 @@ module.exports =
       "displayType": "checkbox",
       "isOverridable": false,
       "isVisible": false,
-      "serviceName": "HDFS",
-      "filename": "hadoop-env.xml",
-      "category": "Advanced hadoop-env"
+      "serviceName": "MISC",
+      "filename": "cluster-env.xml"
+    },
+    {
+      "id": "puppet var",
+      "name": "kerberos_domain",
+      "displayName": "Kerberos realm",
+      "description": "Kerberos realm",
+      "defaultValue": 'EXAMPLE.COM',
+      "isRequired": false,
+      "isOverridable": false,
+      "isVisible": false,
+      "serviceName": "MISC",
+      "filename": "cluster-env.xml"
     },
     {
       "id": "puppet var",
@@ -2251,7 +2262,7 @@ module.exports =
       "displayType": "user",
       "isOverridable": false,
       "isVisible": true,
-      "filename": "core-site.xml",
+      "filename": "hadoop-env.xml",
       "serviceName": "MISC",
       "category": "Users and Groups",
       "belongsToService": ["HIVE", "WEBHCAT", "OOZIE"]
@@ -2474,9 +2485,9 @@ module.exports =
       "isOverridable": false,
       "isVisible": App.supports.customizeSmokeTestUser,
       "serviceName": "MISC",
-      "filename": "hadoop-env.xml",
+      "filename": "cluster-env.xml",
       "category": "Users and Groups",
-      "belongsToService": ["HDFS"]
+      "index": 16
     },
     {
       "id": "puppet var",
@@ -2489,9 +2500,8 @@ module.exports =
       "isOverridable": false,
       "isVisible": true,
       "serviceName": "MISC",
-      "filename": "hadoop-env.xml",
-      "category": "Users and Groups",
-      "belongsToService": ["HDFS"]
+      "filename": "cluster-env.xml",
+      "category": "Users and Groups"
     },
     {
       "id": "puppet var",
@@ -2517,9 +2527,8 @@ module.exports =
       "isOverridable": false,
       "isVisible": true,
       "serviceName": "MISC",
-      "filename": "hadoop-env.xml",
+      "filename": "cluster-env.xml",
       "category": "Users and Groups",
-      "belongsToService": ["HDFS"]
     }
   ]
 };

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7131fa3/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index 4821ba6..3d1fda9 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -434,6 +434,10 @@ var urls = {
       }
     }
   },
+  'config.cluster': {
+    'real': '{stackVersionUrl}/configurations?fields=*',
+    'mock': ''
+  },
   'config.advanced': {
     'real': '{stackVersionUrl}/services/{serviceName}/configurations?fields=*',
     'mock': '/data/wizard/stack/hdp/version{stackVersion}/{serviceName}.json'