You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2014/07/16 14:59:00 UTC

[08/11] AMBARI-6488. Move global to env in stack definitions (aonishuk)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
index a7dd45b..7b15d5e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
@@ -34,10 +34,10 @@ hbase_excluded_hosts = config['commandParams']['excluded_hosts']
 hbase_drain_only = config['commandParams']['mark_draining_only']
 
 hbase_user = status_params.hbase_user
-smokeuser = config['configurations']['global']['smokeuser']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-user_group = config['configurations']['global']['user_group']
+user_group = config['configurations']['hadoop-env']['user_group']
 
 # this is "hadoop-metrics.properties" for 1.x stacks
 metric_prop_file_name = "hadoop-metrics2-hbase.properties"
@@ -45,10 +45,10 @@ metric_prop_file_name = "hadoop-metrics2-hbase.properties"
 # not supporting 32 bit jdk.
 java64_home = config['hostLevelParams']['java_home']
 
-log_dir = config['configurations']['global']['hbase_log_dir']
-master_heapsize = config['configurations']['global']['hbase_master_heapsize']
+log_dir = config['configurations']['hbase-env']['hbase_log_dir']
+master_heapsize = config['configurations']['hbase-env']['hbase_master_heapsize']
 
-regionserver_heapsize = config['configurations']['global']['hbase_regionserver_heapsize']
+regionserver_heapsize = config['configurations']['hbase-env']['hbase_regionserver_heapsize']
 regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, 0.2, 512)
 
 pid_dir = status_params.pid_dir
@@ -57,9 +57,9 @@ tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
 _local_dir_conf = default('/configurations/hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
 local_dir = substitute_vars(_local_dir_conf, config['configurations']['hbase-site'])
 
-client_jaas_config_file = default('hbase_client_jaas_config_file', format("{hbase_conf_dir}/hbase_client_jaas.conf"))
-master_jaas_config_file = default('hbase_master_jaas_config_file', format("{hbase_conf_dir}/hbase_master_jaas.conf"))
-regionserver_jaas_config_file = default('hbase_regionserver_jaas_config_file', format("{hbase_conf_dir}/hbase_regionserver_jaas.conf"))
+client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
+master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
+regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
 
 ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
 ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
@@ -70,8 +70,8 @@ if 'slave_hosts' in config['clusterHostInfo']:
 else:
   rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
   
-smoke_test_user = config['configurations']['global']['smokeuser']
-smokeuser_permissions = default('smokeuser_permissions', "RWXCA")
+smoke_test_user = config['configurations']['hadoop-env']['smokeuser']
+smokeuser_permissions = "RWXCA"
 service_check_data = functions.get_unique_id_and_date()
 
 if security_enabled:
@@ -81,9 +81,9 @@ if security_enabled:
 
 master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
 regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hbase_user_keytab = config['configurations']['global']['hbase_user_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['hadoop-env']['hbase_user_keytab']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 if security_enabled:
   kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
 else:
@@ -94,16 +94,17 @@ if (('hbase-log4j' in config['configurations']) and ('content' in config['config
   log4j_props = config['configurations']['hbase-log4j']['content']
 else:
   log4j_props = None
-
+  
+hbase_env_sh_template = config['configurations']['hbase-env']['content']
 
 hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
 hbase_staging_dir = "/apps/hbase/staging"
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py
index 8360507..850ec8b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py
@@ -22,5 +22,5 @@ from resource_management import *
 
 config = Script.get_config()
 
-pid_dir = config['configurations']['global']['hbase_pid_dir']
-hbase_user = config['configurations']['global']['hbase_user']
+pid_dir = config['configurations']['hbase-env']['hbase_pid_dir']
+hbase_user = config['configurations']['hbase-env']['hbase_user']

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-env.sh.j2
deleted file mode 100644
index 5a95bf4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-env.sh.j2
+++ /dev/null
@@ -1,100 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Set environment variables here.
-
-# The java implementation to use. Java 1.6 required.
-export JAVA_HOME={{java64_home}}
-
-# HBase Configuration directory
-export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
-
-# Extra Java CLASSPATH elements. Optional.
-export HBASE_CLASSPATH=${HBASE_CLASSPATH}
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HBASE_HEAPSIZE=1000
-
-# Extra Java runtime options.
-# Below are what we set by default. May only work with SUN JVM.
-# For more on why as well as other possible settings,
-# see http://wiki.apache.org/hadoop/PerformanceTuning
-export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
-export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
-# Uncomment below to enable java garbage collection logging.
-# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
-
-# Uncomment and adjust to enable JMX exporting
-# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
-# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-#
-# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
-export HBASE_MASTER_OPTS="-Xmx{{master_heapsize}}"
-export HBASE_REGIONSERVER_OPTS="-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
-# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
-# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
-
-# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
-export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
-
-# Extra ssh options. Empty by default.
-# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
-
-# Where log files are stored. $HBASE_HOME/logs by default.
-export HBASE_LOG_DIR={{log_dir}}
-
-# A string representing this instance of hbase. $USER by default.
-# export HBASE_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes. See 'man nice'.
-# export HBASE_NICENESS=10
-
-# The directory where pid files are stored. /tmp by default.
-export HBASE_PID_DIR={{pid_dir}}
-
-# Seconds to sleep between slave commands. Unset by default. This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HBASE_SLAVE_SLEEP=0.1
-
-# Tell HBase whether it should manage it's own instance of Zookeeper or not.
-export HBASE_MANAGES_ZK=false
-
-{% if security_enabled %}
-export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/global.xml
deleted file mode 100644
index e5163f8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/global.xml
+++ /dev/null
@@ -1,87 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <description>Proxy user group.</description>
-  </property>
-
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
-  
-  <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <description>User and Groups.</description>
-  </property>
-  <property>
-    <name>ignore_groupsusers_create</name>
-    <value>false</value>
-    <description>Whether to ignores failures on users and group creation</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..8d8b28a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,213 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>NameNode new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User and Groups.</description>
+  </property>
+  <property>
+    <name>ignore_groupsusers_create</name>
+    <value>false</value>
+    <description>Whether to ignores failures on users and group creation</description>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <value>ambari-qa</value>
+    <description>User executing service checks</description>
+  </property>
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+  
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hadoop-env.sh content</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+  
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
index feff2ab..d0e8d86 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
@@ -174,8 +174,8 @@
 
       <configuration-dependencies>
         <config-type>core-site</config-type>
-        <config-type>global</config-type>
         <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
         <config-type>hadoop-policy</config-type>
         <config-type>hdfs-log4j</config-type>
       </configuration-dependencies>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
index 2c091b7..1978881 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
@@ -56,6 +56,7 @@ class NameNode(Script):
     import status_params
 
     env.set_params(status_params)
+    Execute(format("echo '{namenode_pid_file}' >> /1.txt"))
     check_process_status(status_params.namenode_pid_file)
     pass
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
index 7a4c667..80e4ecf 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
@@ -31,16 +31,16 @@ else:
 #security params
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-falcon_user = config['configurations']['global']['falcon_user']
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+falcon_user = config['configurations']['falcon-env']['falcon_user']
 
 #exclude file
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
 update_exclude_file_only = config['commandParams']['update_exclude_file_only']
 
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 #hosts
 hostname = config["hostname"]
 rm_host = default("/clusterHostInfo/rm_host", [])
@@ -86,20 +86,20 @@ if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
 
 #users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+nagios_user = config['configurations']['nagios-env']['nagios_user']
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+webhcat_user = config['configurations']['hive-env']['hcat_user']
+hcat_user = config['configurations']['hive-env']['hcat_user']
+hive_user = config['configurations']['hive-env']['hive_user']
+smoke_user =  config['configurations']['hadoop-env']['smokeuser']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
 hdfs_user = status_params.hdfs_user
 
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
+user_group = config['configurations']['hadoop-env']['user_group']
+proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
+nagios_group = config['configurations']['nagios-env']['nagios_group']
 smoke_user_group = "users"
 
 #hadoop params
@@ -107,7 +107,7 @@ hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
 hadoop_bin = "/usr/lib/hadoop/sbin"
 
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 
 dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
 dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
index 4097373..0027a4c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
@@ -21,8 +21,8 @@ from resource_management import *
 
 config = Script.get_config()
 
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hdfs_user = config['configurations']['global']['hdfs_user']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
 datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
 namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/global.xml
deleted file mode 100644
index 0953ce7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/global.xml
+++ /dev/null
@@ -1,90 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>hive_database_type</name>
-    <value>mysql</value>
-    <description>Default HIVE DB type.</description>
-  </property>
-  <property>
-    <name>hive_database</name>
-    <value>New MySQL Database</value>
-    <description>
-      Property that determines whether the HIVE DB is managed by Ambari.
-    </description>
-  </property>
-  <property>
-    <name>hive_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database type.</description>
-  </property>
-  <property>
-    <name>hive_database_name</name>
-    <value>hive</value>
-    <description>Database name.</description>
-  </property>
-  <property>
-    <name>hive_dbroot</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive DB Directory.</description>
-  </property>
-  <property>
-    <name>hive_log_dir</name>
-    <value>/var/log/hive</value>
-    <description>Directory for Hive Log files.</description>
-  </property>
-  <property>
-    <name>hive_pid_dir</name>
-    <value>/var/run/hive</value>
-    <description>Hive PID Dir.</description>
-  </property>
-  <property>
-    <name>hive_user</name>
-    <value>hive</value>
-    <description>Hive User.</description>
-  </property>
-
-  <!--HCAT-->
-
-  <property>
-    <name>hcat_log_dir</name>
-    <value>/var/log/webhcat</value>
-    <description>WebHCat Log Dir.</description>
-  </property>
-  <property>
-    <name>hcat_pid_dir</name>
-    <value>/var/run/webhcat</value>
-    <description>WebHCat Pid Dir.</description>
-  </property>
-  <property>
-    <name>hcat_user</name>
-    <value>hcat</value>
-    <description>HCat User.</description>
-  </property>
-  <property>
-    <name>webhcat_user</name>
-    <value>hcat</value>
-    <description>WebHCat User.</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-env.xml
new file mode 100644
index 0000000..224b943
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-env.xml
@@ -0,0 +1,131 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hive_database_type</name>
+    <value>mysql</value>
+    <description>Default HIVE DB type.</description>
+  </property>
+  <property>
+    <name>hive_database</name>
+    <value>New MySQL Database</value>
+    <description>
+      Property that determines whether the HIVE DB is managed by Ambari.
+    </description>
+  </property>
+  <property>
+    <name>hive_ambari_database</name>
+    <value>MySQL</value>
+    <description>Database type.</description>
+  </property>
+  <property>
+    <name>hive_database_name</name>
+    <value>hive</value>
+    <description>Database name.</description>
+  </property>
+  <property>
+    <name>hive_dbroot</name>
+    <value>/usr/lib/hive/lib/</value>
+    <description>Hive DB Directory.</description>
+  </property>
+  <property>
+    <name>hive_log_dir</name>
+    <value>/var/log/hive</value>
+    <description>Directory for Hive Log files.</description>
+  </property>
+  <property>
+    <name>hive_pid_dir</name>
+    <value>/var/run/hive</value>
+    <description>Hive PID Dir.</description>
+  </property>
+  <property>
+    <name>hive_user</name>
+    <value>hive</value>
+    <description>Hive User.</description>
+  </property>
+
+  <!--HCAT-->
+
+  <property>
+    <name>hcat_log_dir</name>
+    <value>/var/log/webhcat</value>
+    <description>WebHCat Log Dir.</description>
+  </property>
+  <property>
+    <name>hcat_pid_dir</name>
+    <value>/var/run/webhcat</value>
+    <description>WebHCat Pid Dir.</description>
+  </property>
+  <property>
+    <name>hcat_user</name>
+    <value>hcat</value>
+    <description>HCat User.</description>
+  </property>
+  <property>
+    <name>webhcat_user</name>
+    <value>hcat</value>
+    <description>WebHCat User.</description>
+  </property>
+  
+  <!-- hive-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hive-env.sh content</description>
+    <value>
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE="{{hive_heapsize}}"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{conf_dir}}
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+elif [ -d "/usr/lib/hive-hcatalog/" ]; then
+  export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar
+else
+  export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar
+fi
+export METASTORE_PORT={{hive_metastore_port}}
+    </value>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
index 5b927ca..e920d67 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
@@ -182,8 +182,8 @@
         <timeout>300</timeout>
       </commandScript>
       <configuration-dependencies>
-        <config-type>global</config-type>
         <config-type>hive-site</config-type>
+        <config-type>hive-env</config-type>
       </configuration-dependencies>
     </service>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
index c98b8a1..a4ca3e0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
@@ -83,7 +83,7 @@ def hive(name=None):
   File(format("{hive_config_dir}/hive-env.sh"),
        owner=params.hive_user,
        group=params.user_group,
-       content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
+       content=InlineTemplate(params.hive_env_sh_template, conf_dir=hive_config_dir)
   )
 
   if name == 'metastore':

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
index 4fac349..37a8051 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
@@ -29,10 +29,10 @@ hive_server_conf_dir = "/etc/hive/conf.server"
 hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
 
 hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
-hive_metastore_db_type = config['configurations']['global']['hive_database_type']
+hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
 
 #users
-hive_user = config['configurations']['global']['hive_user']
+hive_user = config['configurations']['hive-env']['hive_user']
 hive_lib = '/usr/lib/hive/lib/'
 #JDBC driver jar name
 hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
@@ -59,26 +59,26 @@ hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
 hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
 hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
 
-smokeuser = config['configurations']['global']['smokeuser']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 smoke_test_sql = "/tmp/hiveserver2.sql"
 smoke_test_path = "/tmp/hiveserver2Smoke.sh"
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
 
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
 
 #hive_env
 hive_conf_dir = "/etc/hive/conf"
-hive_dbroot = config['configurations']['global']['hive_dbroot']
-hive_log_dir = config['configurations']['global']['hive_log_dir']
+hive_dbroot = config['configurations']['hive-env']['hive_dbroot']
+hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
 hive_pid_dir = status_params.hive_pid_dir
 hive_pid = status_params.hive_pid
 
 #hive-site
-hive_database_name = config['configurations']['global']['hive_database_name']
+hive_database_name = config['configurations']['hive-env']['hive_database_name']
 
 #Starting hiveserver2
 start_hiveserver2_script = 'startHiveserver2.sh.j2'
@@ -91,8 +91,8 @@ hive_metastore_pid = status_params.hive_metastore_pid
 java_share_dir = '/usr/share/java'
 driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
 
-hdfs_user =  config['configurations']['global']['hdfs_user']
-user_group = config['configurations']['global']['user_group']
+hdfs_user =  config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['hadoop-env']['user_group']
 artifact_dir = "/tmp/HDP-artifacts/"
 
 target = format("{hive_lib}/{jdbc_jar_name}")
@@ -103,13 +103,13 @@ driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
 start_hiveserver2_path = "/tmp/start_hiveserver2_script"
 start_metastore_path = "/tmp/start_metastore_script"
 
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 hive_heapsize = config['configurations']['hive-site']['hive.heapsize']
 java64_home = config['hostLevelParams']['java_home']
 
 ##### MYSQL
 
-db_name = config['configurations']['global']['hive_database_name']
+db_name = config['configurations']['hive-env']['hive_database_name']
 mysql_user = "mysql"
 mysql_group = 'mysql'
 mysql_host = config['clusterHostInfo']['hive_mysql_host']
@@ -135,11 +135,11 @@ else:
 
 hcat_dbroot = hcat_lib
 
-hcat_user = config['configurations']['global']['hcat_user']
-webhcat_user = config['configurations']['global']['webhcat_user']
+hcat_user = config['configurations']['hive-env']['hcat_user']
+webhcat_user = config['configurations']['hive-env']['webhcat_user']
 
 hcat_pid_dir = status_params.hcat_pid_dir
-hcat_log_dir = config['configurations']['global']['hcat_log_dir']
+hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
 
 hadoop_conf_dir = '/etc/hadoop/conf'
 
@@ -156,6 +156,7 @@ else:
   log4j_exec_props = None
 
 daemon_name = status_params.daemon_name
+hive_env_sh_template = config['configurations']['hive-env']['content']
 
 hive_hdfs_user_dir = format("/user/{hive_user}")
 hive_hdfs_user_mode = 0700
@@ -163,15 +164,15 @@ hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.wareho
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 # Tez libraries
 tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
 tez_local_api_jars = '/usr/lib/tez/tez*.jar'
 tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
-tez_user = config['configurations']['global']['tez_user']
+tez_user = config['configurations']['tez-env']['tez_user']
 
 if System.get_instance().os_family == "debian":
   mysql_configname = '/etc/mysql/my.cnf'

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py
index ea7ecc2..c1d86e2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py
@@ -22,12 +22,12 @@ from resource_management import *
 
 config = Script.get_config()
 
-hive_pid_dir = config['configurations']['global']['hive_pid_dir']
+hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
 hive_pid = 'hive-server.pid'
 
 hive_metastore_pid = 'hive.pid'
 
-hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir
+hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir
 
 if System.get_instance().os_family == "suse" or System.get_instance().os_family == "debian":
   daemon_name = 'mysql'

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/hive-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/hive-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/hive-env.sh.j2
deleted file mode 100644
index cfe3197..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/hive-env.sh.j2
+++ /dev/null
@@ -1,79 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hive and Hadoop environment variables here. These variables can be used
-# to control the execution of Hive. It should be used by admins to configure
-# the Hive installation (so that users do not have to set environment variables
-# or set command line parameters to get correct behavior).
-#
-# The hive service being invoked (CLI/HWI etc.) is available via the environment
-# variable SERVICE
-
-# Hive Client memory usage can be an issue if a large number of clients
-# are running at the same time. The flags below have been useful in
-# reducing memory usage:
-#
- if [ "$SERVICE" = "cli" ]; then
-   if [ -z "$DEBUG" ]; then
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-   else
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-   fi
- fi
-
-# The heap size of the jvm stared by hive shell script can be controlled via:
-
-export HADOOP_HEAPSIZE="{{hive_heapsize}}"
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-
-# Larger heap size may be required when running queries over large number of files or partitions.
-# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-# appropriate for hive server (hwi etc).
-
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-# Hive Configuration Directory can be controlled by:
-export HIVE_CONF_DIR={{conf_dir}}
-
-# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
-if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-elif [ -d "/usr/lib/hive-hcatalog/" ]; then
-  export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar
-else
-  export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar
-fi
-export METASTORE_PORT={{hive_metastore_port}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/global.xml
deleted file mode 100644
index f36020d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/global.xml
+++ /dev/null
@@ -1,51 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>nagios_user</name>
-    <value>nagios</value>
-    <description>Nagios Username.</description>
-  </property>
-  <property>
-    <name>nagios_group</name>
-    <value>nagios</value>
-    <description>Nagios Group.</description>
-  </property>
-  <property>
-    <name>nagios_web_login</name>
-    <value>nagiosadmin</value>
-    <description>Nagios web user.</description>
-  </property>
-  <property require-input = "true">
-    <name>nagios_web_password</name>
-    <value></value>
-    <type>PASSWORD</type>
-    <description>Nagios Admin Password.</description>
-  </property>
-  <property require-input = "true">
-    <name>nagios_contact</name>
-    <value></value>
-    <description>Hadoop Admin Email.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/nagios-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/nagios-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/nagios-env.xml
new file mode 100644
index 0000000..54c742c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/nagios-env.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>nagios_user</name>
+    <value>nagios</value>
+    <description>Nagios Username.</description>
+  </property>
+  <property>
+    <name>nagios_group</name>
+    <value>nagios</value>
+    <description>Nagios Group.</description>
+  </property>
+  <property>
+    <name>nagios_web_login</name>
+    <value>nagiosadmin</value>
+    <description>Nagios web user.</description>
+  </property>
+  <property require-input = "true">
+    <name>nagios_web_password</name>
+    <value></value>
+    <type>PASSWORD</type>
+    <description>Nagios Admin Password.</description>
+  </property>
+  <property require-input = "true">
+    <name>nagios_contact</name>
+    <value></value>
+    <description>Hadoop Admin Email.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
index 1c5bcba..7625f81 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
@@ -157,7 +157,7 @@
         </osSpecific>
       </osSpecifics>
       <configuration-dependencies>
-        <config-type>global</config-type>
+        <config-type>nagios-env</config-type>
       </configuration-dependencies>
       <monitoringService>true</monitoringService>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py
index 8a31ddf..249da3b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py
@@ -79,7 +79,7 @@ nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
 nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
 nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
 eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
-nagios_principal_name = default("nagios_principal_name", "nagios")
+nagios_principal_name = default("/configurations/hadoop-env/nagios_principal_name", "nagios")
 hadoop_ssl_enabled = False
 
 oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
@@ -114,7 +114,7 @@ drpc_port = config['configurations']['storm-site']['drpc.port']
 nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
 supervisor_port = "56431"
 storm_rest_api_port = "8745"
-falcon_port = config['configurations']['global']['falcon_port']
+falcon_port = config['configurations']['falcon-env']['falcon_port']
 ahs_port = get_port_from_url(config['configurations']['yarn-site']['yarn.timeline-service.webapp.address'])
 
 # use sensible defaults for checkpoint as they are required by Nagios and 
@@ -131,7 +131,7 @@ else:
 
 # this is different for HDP1
 nn_metrics_property = "FSNamesystem"
-clientPort = config['configurations']['global']['clientPort'] #ZK 
+clientPort = config['configurations']['zookeeper-env']['clientPort'] #ZK 
 
 
 java64_home = config['hostLevelParams']['java_home']
@@ -139,8 +139,8 @@ check_cpu_on = is_jdk_greater_6(java64_home)
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
-nagios_keytab_path = default("nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+nagios_keytab_path = default("/configurations/hadoop-env/nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 dfs_ha_enabled = False
 dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
@@ -195,12 +195,12 @@ hdp_mon_nagios_addons_path = format("{web_conf_dir}/hdp_mon_nagios_addons.conf")
 ambarinagios_php_dir = "/usr/share/hdp/nagios/"
 ambarinagios_php_filename = "nagios_alerts.php"
 
-nagios_user = config['configurations']['global']['nagios_user']
-nagios_group = config['configurations']['global']['nagios_group']
-nagios_web_login = config['configurations']['global']['nagios_web_login']
-nagios_web_password = config['configurations']['global']['nagios_web_password']
-user_group = config['configurations']['global']['user_group']
-nagios_contact = config['configurations']['global']['nagios_contact']
+nagios_user = config['configurations']['nagios-env']['nagios_user']
+nagios_group = config['configurations']['nagios-env']['nagios_group']
+nagios_web_login = config['configurations']['nagios-env']['nagios_web_login']
+nagios_web_password = config['configurations']['nagios-env']['nagios_web_password']
+user_group = config['configurations']['hadoop-env']['user_group']
+nagios_contact = config['configurations']['nagios-env']['nagios_contact']
 
 # - test for HDFS or HCFS (glusterfs)
 if 'namenode_host' in config['clusterHostInfo']:

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/global.xml
deleted file mode 100644
index 24422b3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/global.xml
+++ /dev/null
@@ -1,60 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>oozie_user</name>
-    <value>oozie</value>
-    <description>Oozie User.</description>
-  </property>
-  <property>
-    <name>oozie_database</name>
-    <value>New Derby Database</value>
-    <description>Oozie Server Database.</description>
-  </property>
-  <property>
-    <name>oozie_derby_database</name>
-    <value>Derby</value>
-    <description>Oozie Derby Database</description>
-  </property>
-  <property>
-    <name>oozie_data_dir</name>
-    <value>/hadoop/oozie/data</value>
-    <description>Data directory in which the Oozie DB exists</description>
-  </property>
-  <property>
-    <name>oozie_log_dir</name>
-    <value>/var/log/oozie</value>
-    <description>Directory for oozie logs</description>
-  </property>
-  <property>
-    <name>oozie_pid_dir</name>
-    <value>/var/run/oozie</value>
-    <description>Directory in which the pid files for oozie reside.</description>
-  </property>
-  <property>
-    <name>oozie_admin_port</name>
-    <value>11001</value>
-    <description>The admin port Oozie server runs.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml
new file mode 100644
index 0000000..de36837
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml
@@ -0,0 +1,128 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>oozie_user</name>
+    <value>oozie</value>
+    <description>Oozie User.</description>
+  </property>
+  <property>
+    <name>oozie_database</name>
+    <value>New Derby Database</value>
+    <description>Oozie Server Database.</description>
+  </property>
+  <property>
+    <name>oozie_derby_database</name>
+    <value>Derby</value>
+    <description>Oozie Derby Database</description>
+  </property>
+  <property>
+    <name>oozie_data_dir</name>
+    <value>/hadoop/oozie/data</value>
+    <description>Data directory in which the Oozie DB exists</description>
+  </property>
+  <property>
+    <name>oozie_log_dir</name>
+    <value>/var/log/oozie</value>
+    <description>Directory for oozie logs</description>
+  </property>
+  <property>
+    <name>oozie_pid_dir</name>
+    <value>/var/run/oozie</value>
+    <description>Directory in which the pid files for oozie reside.</description>
+  </property>
+  <property>
+    <name>oozie_admin_port</name>
+    <value>11001</value>
+    <description>The admin port Oozie server runs.</description>
+  </property>
+
+  <!-- oozie-env.sh -->
+  <property>
+    <name>content</name>
+    <description>oozie-env.sh content</description>
+    <value>
+#!/bin/bash
+
+if [ -d "/usr/lib/bigtop-tomcat" ]; then
+  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}
+  export CATALINA_BASE=${CATALINA_BASE:-/var/lib/oozie/oozie-server}
+  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}
+  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat
+fi
+
+#Set JAVA HOME
+export JAVA_HOME={{java_home}}
+
+export JRE_HOME=${JAVA_HOME}
+
+# Set Oozie specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs Oozie
+# Java System properties for Oozie should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# Oozie configuration file to load from Oozie configuration directory
+#
+# export OOZIE_CONFIG_FILE=oozie-site.xml
+
+# Oozie logs directory
+#
+export OOZIE_LOG={{oozie_log_dir}}
+
+# Oozie pid directory
+#
+export CATALINA_PID={{pid_file}}
+
+#Location of the data for oozie
+export OOZIE_DATA={{oozie_data_dir}}
+
+# Oozie Log4J configuration file to load from Oozie configuration directory
+#
+# export OOZIE_LOG4J_FILE=oozie-log4j.properties
+
+# Reload interval of the Log4J configuration file, in seconds
+#
+# export OOZIE_LOG4J_RELOAD=10
+
+# The port Oozie server runs
+#
+export OOZIE_HTTP_PORT={{oozie_server_port}}
+
+# The admin port Oozie server runs
+#
+export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
+
+# The host name Oozie server runs on
+#
+# export OOZIE_HTTP_HOSTNAME=`hostname -f`
+
+# The base URL for callback URLs to Oozie
+#
+# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
+export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml
index 6540801..e4dcce0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml
@@ -131,8 +131,8 @@
       </commandScript>
 
       <configuration-dependencies>
-        <config-type>global</config-type>
         <config-type>oozie-site</config-type>
+        <config-type>oozie-env</config-type>
         <config-type>oozie-log4j</config-type>
       </configuration-dependencies>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie.py
index 3391d52..5834088 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie.py
@@ -46,8 +46,9 @@ def oozie(is_server=False # TODO: see if see can remove this
     group = params.user_group
   )
   
-  TemplateConfig( format("{conf_dir}/oozie-env.sh"),
-    owner = params.oozie_user
+  File(format("{conf_dir}/oozie-env.sh"),
+    owner=params.oozie_user,
+    content=InlineTemplate(params.oozie_env_sh_template)
   )
 
   if (params.log4j_props != None):

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
index 3e8cd4a..ad5632e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
@@ -24,11 +24,11 @@ import status_params
 # server configurations
 config = Script.get_config()
 
-oozie_user = config['configurations']['global']['oozie_user']
-smokeuser = config['configurations']['global']['smokeuser']
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 conf_dir = "/etc/oozie/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
-user_group = config['configurations']['global']['user_group']
+user_group = config['configurations']['hadoop-env']['user_group']
 jdk_location = config['hostLevelParams']['jdk_location']
 check_db_connection_jar_name = "DBConnectionVerification.jar"
 check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
@@ -44,11 +44,12 @@ oozie_libext_dir = "/usr/lib/oozie/libext"
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
 oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
-smokeuser_keytab = config['configurations']['global']['smokeuser_keytab']
-oozie_keytab = config['configurations']['global']['oozie_keytab']
+smokeuser_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+oozie_keytab = config['configurations']['hadoop-env']['oozie_keytab']
+oozie_env_sh_template = config['configurations']['oozie-env']['content']
 
 oracle_driver_jar_name = "ojdbc6.jar"
 java_share_dir = "/usr/share/java"
@@ -57,10 +58,11 @@ java_home = config['hostLevelParams']['java_home']
 oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
 oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
 oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
-oozie_log_dir = config['configurations']['global']['oozie_log_dir']
-oozie_data_dir = config['configurations']['global']['oozie_data_dir']
+oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir']
+oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
 oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
-oozie_server_admin_port = config['configurations']['global']['oozie_admin_port']
+oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
+oozie_env_sh_template = config['configurations']['oozie-env']['content']
 oozie_lib_dir = "/var/lib/oozie/"
 oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
 
@@ -92,9 +94,9 @@ oozie_hdfs_user_mode = 0775
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/status_params.py
index c44fcf4..a665449 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/status_params.py
@@ -22,5 +22,5 @@ from resource_management import *
 
 config = Script.get_config()
 
-oozie_pid_dir = config['configurations']['global']['oozie_pid_dir']
+oozie_pid_dir = config['configurations']['oozie-env']['oozie_pid_dir']
 pid_file = format("{oozie_pid_dir}/oozie.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-env.sh.j2
deleted file mode 100644
index 4eb12cc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-env.sh.j2
+++ /dev/null
@@ -1,95 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#      http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-if [ -d "/usr/lib/bigtop-tomcat" ]; then
-  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}
-  export CATALINA_BASE=${CATALINA_BASE:-/var/lib/oozie/oozie-server}
-  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}
-  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat
-fi
-
-#Set JAVA HOME
-export JAVA_HOME={{java_home}}
-
-export JRE_HOME=${JAVA_HOME}
-
-# Set Oozie specific environment variables here.
-
-# Settings for the Embedded Tomcat that runs Oozie
-# Java System properties for Oozie should be specified in this variable
-#
-# export CATALINA_OPTS=
-
-# Oozie configuration file to load from Oozie configuration directory
-#
-# export OOZIE_CONFIG_FILE=oozie-site.xml
-
-# Oozie logs directory
-#
-export OOZIE_LOG={{oozie_log_dir}}
-
-# Oozie pid directory
-#
-export CATALINA_PID={{pid_file}}
-
-#Location of the data for oozie
-export OOZIE_DATA={{oozie_data_dir}}
-
-# Oozie Log4J configuration file to load from Oozie configuration directory
-#
-# export OOZIE_LOG4J_FILE=oozie-log4j.properties
-
-# Reload interval of the Log4J configuration file, in seconds
-#
-# export OOZIE_LOG4J_RELOAD=10
-
-# The port Oozie server runs
-#
-export OOZIE_HTTP_PORT={{oozie_server_port}}
-
-# The admin port Oozie server runs
-#
-export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
-
-# The host name Oozie server runs on
-#
-# export OOZIE_HTTP_HOSTNAME=`hostname -f`
-
-# The base URL for callback URLs to Oozie
-#
-# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
-export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-env.xml
new file mode 100644
index 0000000..d9861a4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-env.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- pig-env.sh -->
+  <property>
+    <name>content</name>
+    <description>pig-env.sh content</description>
+    <value>
+JAVA_HOME={{java64_home}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+if [ -d "/usr/lib/tez" ]; then
+  PIG_OPTS="$PIG_OPTS -Dmapreduce.framework.name=yarn"
+fi
+    </value>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml
index 4863e56..e0dd290 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml
@@ -52,7 +52,7 @@
       </commandScript>
 
       <configuration-dependencies>
-        <config-type>global</config-type>
+        <config-type>pig-env</config-type>
         <config-type>pig-log4j</config-type>
         <config-type>pig-properties</config-type>
       </configuration-dependencies>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
index 03cd9ed..691c352 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
@@ -26,13 +26,14 @@ config = Script.get_config()
 
 pig_conf_dir = "/etc/pig/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user = config['configurations']['global']['hdfs_user']
-smokeuser = config['configurations']['global']['smokeuser']
-user_group = config['configurations']['global']['user_group']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
+user_group = config['configurations']['hadoop-env']['user_group']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+pig_env_sh_template = config['configurations']['pig-env']['content']
 
 # not supporting 32 bit jdk.
 java64_home = config['hostLevelParams']['java_home']

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0ae1fdd/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig.py
index 1fc1b09..8a8d50d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig.py
@@ -30,7 +30,10 @@ def pig():
     group = params.user_group
   )
 
-  pig_TemplateConfig( ['pig-env.sh'])
+  File(format("{pig_conf_dir}/pig-env.sh"),
+    owner=params.hdfs_user,
+    content=InlineTemplate(params.pig_env_sh_template)
+  )
 
   # pig_properties is always set to a default even if it's not in the payload
   File(format("{params.pig_conf_dir}/pig.properties"),