You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by al...@apache.org on 2016/12/19 17:48:23 UTC

[01/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Repository: ambari
Updated Branches:
  refs/heads/branch-2.5 4c04a9153 -> c358ae0c2


http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
deleted file mode 100644
index 95a5f84..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,145 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HDFS</name>
-      <displayName>HDFS</displayName>
-      <version>3.0.0.3.0</version>
-      <extends>common-services/HDFS/3.0.0.3.0</extends>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>rpcbind</name>
-              <condition>should_install_rpcbind</condition>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat7,amazon2015,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>hadoop_${stack_version}</name>
-            </package>
-            <package>
-              <name>hadoop_${stack_version}-client</name>
-            </package>
-            <package>
-              <name>snappy</name>
-            </package>
-            <package>
-              <name>snappy-devel</name>
-            </package>
-            <package>
-              <name>lzo</name>
-              <skipUpgrade>true</skipUpgrade>
-              <condition>should_install_lzo</condition>
-            </package>
-            <package>
-              <name>hadooplzo_${stack_version}</name>
-              <condition>should_install_lzo</condition>
-            </package>
-            <package>
-              <name>hadooplzo_${stack_version}-native</name>
-              <condition>should_install_lzo</condition>
-            </package>
-            <package>
-              <name>hadoop_${stack_version}-libhdfs</name>
-            </package>
-          </packages>
-        </osSpecific>
-
-        <osSpecific>
-          <osFamily>suse12</osFamily>
-          <packages>
-            <package>
-              <name>hadoop_${stack_version}</name>
-            </package>
-            <package>
-              <name>hadoop_${stack_version}-client</name>
-            </package>
-            <package>
-              <name>snappy</name>
-            </package>
-            <package>
-              <name>snappy-devel</name>
-            </package>
-            <package>
-              <name>liblzo2-2</name>
-              <skipUpgrade>true</skipUpgrade>
-              <condition>should_install_lzo</condition>
-            </package>
-            <package>
-              <name>hadooplzo_${stack_version}</name>
-              <condition>should_install_lzo</condition>
-            </package>
-            <package>
-              <name>hadooplzo_${stack_version}-native</name>
-              <condition>should_install_lzo</condition>
-            </package>
-            <package>
-              <name>hadoop_${stack_version}-libhdfs</name>
-            </package>
-          </packages>
-        </osSpecific>
-
-        <osSpecific>
-          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-${stack_version}-client</name>
-            </package>
-            <package>
-              <name>hadoop-${stack_version}-hdfs-datanode</name>
-            </package>
-            <package>
-              <name>hadoop-${stack_version}-hdfs-journalnode</name>
-            </package>
-            <package>
-              <name>hadoop-${stack_version}-hdfs-namenode</name>
-            </package>
-            <package>
-              <name>hadoop-${stack_version}-hdfs-secondarynamenode</name>
-            </package>
-            <package>
-              <name>hadoop-${stack_version}-hdfs-zkfc</name>
-            </package>
-            <package>
-              <name>libsnappy1</name>
-            </package>
-            <package>
-              <name>libsnappy-dev</name>
-            </package>
-            <package>
-              <name>hadooplzo-${stack_version}</name>
-              <condition>should_install_lzo</condition>
-            </package>
-            <package>
-              <name>libhdfs0-${stack_version}</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
deleted file mode 100644
index deb4ef7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
+++ /dev/null
@@ -1,49 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Put site-specific property overrides in this file. -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
-  <!-- These configs were inherited from HDP 2.2 -->
-  <!-- mapred-env.sh -->
-  <property>
-    <name>content</name>
-    <display-name>mapred-env template</display-name>
-    <description>This is the jinja template for mapred-env.sh file</description>
-    <value>
-      # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-
-      export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
-
-      export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
-
-      #export HADOOP_JOB_HISTORYSERVER_OPTS=
-      #export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
-      #export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
-      #export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
-      #export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
-      #export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
-      export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
-      export HADOOP_OPTS="-Djava.io.tmpdir={{hadoop_java_io_tmpdir}} $HADOOP_OPTS"
-      export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
deleted file mode 100644
index cbeb08b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
+++ /dev/null
@@ -1,78 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Put site-specific property overrides in this file. -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
-  <property>
-    <name>mapreduce.application.classpath</name>
-    <value>$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure</value>
-    <description>
-      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
-      entries.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-{{architecture}}-64</value>
-    <description>
-      Additional execution environment entries for map and reduce task processes.
-      This is not an additive property. You must preserve the original value if
-      you want your map and reduce tasks to have access to native libraries (compression, etc)
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.application.framework.path</name>
-    <value>/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.app.mapreduce.am.admin-command-opts</name>
-    <value>-Dhdp.version=${hdp.version}</value>
-    <description>
-      Java opts for the MR App Master processes.
-      The following symbol, if present, will be interpolated: @taskid@ is replaced
-      by current TaskID. Any other occurrences of '@' will go unchanged.
-      For example, to enable verbose gc logging to a file named for the taskid in
-      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.admin.map.child.java.opts</name>
-    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.admin.reduce.child.java.opts</name>
-    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
deleted file mode 100644
index 0f46d75..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<configuration supports_final="true">
-
-  <property>
-    <name>yarn.nodemanager.aux-services.spark_shuffle.classpath</name>
-    <value>{{stack_root}}/${hdp.version}/spark/aux/*</value>
-    <description>The auxiliary service classpath to use for Spark</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.5 -->
-  <property>
-    <name>yarn.nodemanager.aux-services.spark2_shuffle.classpath</name>
-    <value>{{stack_root}}/${hdp.version}/spark2/aux/*</value>
-    <description>The auxiliary service classpath to use for Spark 2</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
deleted file mode 100644
index 096f205..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
+++ /dev/null
@@ -1,81 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>YARN</name>
-      <version>3.0.0.3.0</version>
-      <extends>common-services/YARN/3.0.0.3.0</extends>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>amazon2015,redhat6,redhat7,suse11,suse12</osFamily>
-          <packages>
-            <package>
-              <name>hadoop_${stack_version}-yarn</name>
-            </package>
-            <package>
-              <name>hadoop_${stack_version}-mapreduce</name>
-            </package>
-            <package>
-              <name>hadoop_${stack_version}-hdfs</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-${stack_version}-yarn</name>
-            </package>
-            <package>
-              <name>hadoop-${stack_version}-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-
-    <service>
-      <name>MAPREDUCE2</name>
-      <displayName>MapReduce2</displayName>
-      <version>2.7.1.3.0</version>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>amazon2015,redhat6,redhat7,suse11,suse12</osFamily>
-          <packages>
-            <package>
-              <name>hadoop_${stack_version}-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-${stack_version}-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-    </service>
-  </services>
-</metainfo>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index 50cb18d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>ZOOKEEPER</name>
-      <version>3.4.9.3.0</version>
-      <extends>common-services/ZOOKEEPER/3.4.9</extends>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>amazon2015,redhat6,redhat7,suse11,suse12</osFamily>
-          <packages>
-            <package>
-              <name>zookeeper_${stack_version}</name>
-            </package>
-            <package>
-              <name>zookeeper_${stack_version}-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>ubuntu12,ubuntu14,ubuntu16</osFamily>
-          <packages>
-            <package>
-              <name>zookeeper-${stack_version}</name>
-            </package>
-            <package>
-              <name>zookeeper-${stack_version}-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
deleted file mode 100644
index 3176354..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
+++ /dev/null
@@ -1,95 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_system_heatmap",
-      "display_name": "Heatmaps",
-      "section_name": "SYSTEM_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Host Disk Space Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "disk_free",
-              "metric_path": "metrics/disk/disk_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "disk_total",
-              "metric_path": "metrics/disk/disk_total",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Disk Space Used %",
-              "value": "${((disk_total-disk_free)/disk_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host Memory Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_cached",
-              "metric_path": "metrics/memory/mem_cached",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host CPU Wait IO %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${cpu_wio*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}


[17/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-audit.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-audit.xml
deleted file mode 100644
index fd41817..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-audit.xml
+++ /dev/null
@@ -1,217 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <!-- These configs were inherited from HDP 2.3 -->
-  <property>
-    <name>xasecure.audit.is.enabled</name>
-    <value>true</value>
-    <description>Is Audit enabled?</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db</name>
-    <value>false</value>
-    <display-name>Audit to DB</display-name>
-    <description>Is Audit to DB enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.db</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.jdbc.url</name>
-    <value>{{audit_jdbc_url}}</value>
-    <description>Audit DB JDBC URL</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.user</name>
-    <value>{{xa_audit_db_user}}</value>
-    <description>Audit DB JDBC User</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.password</name>
-    <value>crypted</value>
-    <property-type>PASSWORD</property-type>
-    <description>Audit DB JDBC Password</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.jdbc.driver</name>
-    <value>{{jdbc_driver}}</value>
-    <description>Audit DB JDBC Driver</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.credential.provider.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>Credential file store</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
-    <value>/var/log/hadoop/hdfs/audit/db/spool</value>
-    <description>/var/log/hadoop/hdfs/audit/db/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs</name>
-    <value>true</value>
-    <display-name>Audit to HDFS</display-name>
-    <description>Is Audit to HDFS enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.hdfs</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs.dir</name>
-    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
-    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.hdfs.dir</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
-    <value>/var/log/hadoop/hdfs/audit/hdfs/spool</value>
-    <description>/var/log/hadoop/hdfs/audit/hdfs/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr</name>
-    <value>false</value>
-    <display-name>Audit to SOLR</display-name>
-    <description>Is Solr audit enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.solr</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.urls</name>
-    <value/>
-    <description>Solr URL</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-admin-site</type>
-        <name>ranger.audit.solr.urls</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.zookeepers</name>
-    <value>NONE</value>
-    <description>Solr Zookeeper string</description>
-    <depends-on>
-      <property>
-        <type>ranger-admin-site</type>
-        <name>ranger.audit.solr.zookeepers</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
-    <value>/var/log/hadoop/hdfs/audit/solr/spool</value>
-    <description>/var/log/hadoop/hdfs/audit/solr/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.provider.summary.enabled</name>
-    <value>false</value>
-    <display-name>Audit provider summary enabled</display-name>
-    <description>Enable Summary audit?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These configs are deleted in HDP 2.5. -->
-  <property>
-    <name>xasecure.audit.destination.db</name>
-    <deleted>true</deleted>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.jdbc.url</name>
-    <deleted>true</deleted>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.user</name>
-    <deleted>true</deleted>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.password</name>
-    <deleted>true</deleted>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.jdbc.driver</name>
-    <deleted>true</deleted>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.credential.provider.file</name>
-    <deleted>true</deleted>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
-    <deleted>true</deleted>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-plugin-properties.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-plugin-properties.xml
deleted file mode 100644
index b31742c..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-plugin-properties.xml
+++ /dev/null
@@ -1,98 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>policy_user</name>
-    <value>ambari-qa</value>
-    <display-name>Policy user for HDFS</display-name>
-    <description>This user must be system user and also present at Ranger
-      admin portal</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hadoop.rpc.protection</name>
-    <value/>
-    <description>Used for repository creation on ranger admin
-    </description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>common.name.for.certificate</name>
-    <value/>
-    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger-hdfs-plugin-enabled</name>
-    <value>No</value>
-    <display-name>Enable Ranger for HDFS</display-name>
-    <description>Enable ranger hdfs plugin</description>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>ranger-hdfs-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <value-attributes>
-      <type>boolean</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>REPOSITORY_CONFIG_USERNAME</name>
-    <value>hadoop</value>
-    <display-name>Ranger repository config user</display-name>
-    <description>Used for repository creation on ranger admin
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>REPOSITORY_CONFIG_PASSWORD</name>
-    <value>hadoop</value>
-    <display-name>Ranger repository config password</display-name>
-    <property-type>PASSWORD</property-type>
-    <description>Used for repository creation on ranger admin
-    </description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.5 -->
-  <property>
-    <name>hadoop.rpc.protection</name>
-    <value>authentication</value>
-    <description>Used for repository creation on ranger admin</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false" />
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-policymgr-ssl.xml
deleted file mode 100644
index de3fcd6..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-policymgr-ssl.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <!-- These configs were inherited from HDP 2.3 -->
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore</name>
-    <value>{{stack_root}}/current/hadoop-client/conf/ranger-plugin-keystore.jks</value>
-    <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore.password</name>
-    <value>myKeyFilePassword</value>
-    <property-type>PASSWORD</property-type>
-    <description>password for keystore</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore</name>
-    <value>{{stack_root}}/current/hadoop-client/conf/ranger-plugin-truststore.jks</value>
-    <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore.password</name>
-    <value>changeit</value>
-    <property-type>PASSWORD</property-type>
-    <description>java truststore password</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java keystore credential file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java truststore credential file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-security.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-security.xml
deleted file mode 100644
index 1b0a821..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ranger-hdfs-security.xml
+++ /dev/null
@@ -1,65 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <!-- These configs were inherited from HDP 2.3 -->
-  <property>
-    <name>ranger.plugin.hdfs.service.name</name>
-    <value>{{repo_name}}</value>
-    <description>Name of the Ranger service containing Hdfs policies</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.hdfs.policy.source.impl</name>
-    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
-    <description>Class to retrieve policies from the source</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.hdfs.policy.rest.url</name>
-    <value>{{policymgr_mgr_url}}</value>
-    <description>URL to Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.hdfs.policy.rest.ssl.config.file</name>
-    <value>/etc/hadoop/conf/ranger-policymgr-ssl.xml</value>
-    <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.hdfs.policy.pollIntervalMs</name>
-    <value>30000</value>
-    <description>How often to poll for changes in policies?</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.hdfs.policy.cache.dir</name>
-    <value>/etc/ranger/{{repo_name}}/policycache</value>
-    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.add-hadoop-authorization</name>
-    <value>true</value>
-    <description>Enable/Disable the default hadoop authorization (based on rwxrwxrwx permission on the resource) if Ranger Authorization fails.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-client.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-client.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-client.xml
deleted file mode 100644
index 6ec064a..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-client.xml
+++ /dev/null
@@ -1,70 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-  <property>
-    <name>ssl.client.truststore.location</name>
-    <value>/etc/security/clientKeys/all.jks</value>
-    <description>Location of the trust store file.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ssl.client.truststore.type</name>
-    <value>jks</value>
-    <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ssl.client.truststore.password</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <description>Password to open the trust store file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ssl.client.truststore.reload.interval</name>
-    <value>10000</value>
-    <description>Truststore reload interval, in milliseconds.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ssl.client.keystore.type</name>
-    <value>jks</value>
-    <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ssl.client.keystore.location</name>
-    <value>/etc/security/clientKeys/keystore.jks</value>
-    <description>Location of the keystore file.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ssl.client.keystore.password</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <description>Password to open the keystore file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-server.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-server.xml
deleted file mode 100644
index 5d2745f..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-server.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-  <property>
-    <name>ssl.server.truststore.location</name>
-    <value>/etc/security/serverKeys/all.jks</value>
-    <description>Location of the trust store file.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ssl.server.truststore.type</name>
-    <value>jks</value>
-    <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ssl.server.truststore.password</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <description>Password to open the trust store file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ssl.server.truststore.reload.interval</name>
-    <value>10000</value>
-    <description>Truststore reload interval, in milliseconds.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ssl.server.keystore.type</name>
-    <value>jks</value>
-    <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ssl.server.keystore.location</name>
-    <value>/etc/security/serverKeys/keystore.jks</value>
-    <description>Location of the keystore file.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ssl.server.keystore.password</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <description>Password to open the keystore file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ssl.server.keystore.keypassword</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <description>Password for private key in keystore file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/kerberos.json
deleted file mode 100644
index 1dd801b..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/kerberos.json
+++ /dev/null
@@ -1,246 +0,0 @@
-{
-  "services": [
-    {
-      "name": "HDFS",
-      "identities": [
-        {
-          "name": "/spnego",
-          "principal": {
-            "configuration": "hdfs-site/dfs.web.authentication.kerberos.principal"
-          },
-          "keytab": {
-            "configuration": "hdfs-site/dfs.web.authentication.kerberos.keytab"
-          }
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "auth_to_local_properties" : [
-        "core-site/hadoop.security.auth_to_local"
-      ],
-      "configurations": [
-        {
-          "core-site": {
-            "hadoop.security.authentication": "kerberos",
-            "hadoop.security.authorization": "true",
-            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
-          }
-        },
-        {
-          "ranger-hdfs-audit": {
-            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
-            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
-            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
-            "xasecure.audit.jaas.Client.option.storeKey": "false",
-            "xasecure.audit.jaas.Client.option.serviceName": "solr",
-            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name":  "HDFS_CLIENT",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        },
-        {
-          "name": "NAMENODE",
-          "identities": [
-            {
-              "name": "hdfs",
-              "principal": {
-                "value": "${hadoop-env/hdfs_user}-${cluster_name|toLower()}@${realm}",
-                "type" : "user" ,
-                "configuration": "hadoop-env/hdfs_principal_name",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/hdfs.headless.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hadoop-env/hdfs_user_keytab"
-              }
-            },
-            {
-              "name": "namenode_nn",
-              "principal": {
-                "value": "nn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hdfs-site/dfs.namenode.kerberos.principal",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nn.service.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hdfs-site/dfs.namenode.keytab.file"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal"
-              }
-            },
-            {
-              "name": "/HDFS/NAMENODE/namenode_nn",
-              "principal": {
-                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.principal"
-              },
-              "keytab": {
-                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.keyTab"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "hdfs-site": {
-                "dfs.block.access.token.enable": "true"
-              }
-            }
-          ]
-        },
-        {
-          "name": "DATANODE",
-          "identities": [
-            {
-              "name": "datanode_dn",
-              "principal": {
-                "value": "dn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hdfs-site/dfs.datanode.kerberos.principal",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/dn.service.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hdfs-site/dfs.datanode.keytab.file"
-              }
-            }
-          ],
-          "configurations" : [
-            {
-              "hdfs-site" : {
-                "dfs.datanode.address" : "0.0.0.0:1019",
-                "dfs.datanode.http.address": "0.0.0.0:1022"
-              }
-            }
-          ]
-        },
-        {
-          "name": "SECONDARY_NAMENODE",
-          "identities": [
-            {
-              "name": "secondary_namenode_nn",
-              "principal": {
-                "value": "nn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.principal",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nn.service.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hdfs-site/dfs.secondary.namenode.keytab.file"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal"
-              }
-            }
-          ]
-        },
-        {
-          "name": "NFS_GATEWAY",
-          "identities": [
-            {
-              "name": "nfsgateway",
-              "principal": {
-                "value": "nfs/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hdfs-site/nfs.kerberos.principal",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nfs.service.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hdfs-site/nfs.keytab.file"
-              }
-            }
-          ]
-        },
-        {
-          "name": "JOURNALNODE",
-          "identities": [
-            {
-              "name": "journalnode_jn",
-              "principal": {
-                "value": "jn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hdfs-site/dfs.journalnode.kerberos.principal",
-                "local_username" : "${hadoop-env/hdfs_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/jn.service.keytab",
-                "owner": {
-                  "name": "${hadoop-env/hdfs_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "hdfs-site/dfs.journalnode.keytab.file"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metainfo.xml
deleted file mode 100644
index 967c974..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metainfo.xml
+++ /dev/null
@@ -1,405 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HDFS</name>
-      <displayName>HDFS</displayName>
-      <comment>Apache Hadoop Distributed File System</comment>
-      <version>3.0.0.3.0</version>
-
-      <components>
-        <component>
-          <name>NAMENODE</name>
-          <displayName>NameNode</displayName>
-          <category>MASTER</category>
-          <cardinality>1-2</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <reassignAllowed>true</reassignAllowed>
-          <dependencies>
-            <dependency>
-              <name>HDFS/ZKFC</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>false</enabled>
-              </auto-deploy>
-              <conditions>
-                <condition xsi:type="propertyExists">
-                  <configType>hdfs-site</configType>
-                  <property>dfs.nameservices</property>
-                </condition>
-              </conditions>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>false</enabled>
-              </auto-deploy>
-              <conditions>
-                <condition xsi:type="propertyExists">
-                  <configType>hdfs-site</configType>
-                  <property>dfs.nameservices</property>
-                </condition>
-              </conditions>
-            </dependency>
-            <dependency>
-              <name>HDFS/JOURNALNODE</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>false</enabled>
-              </auto-deploy>
-              <conditions>
-                <condition xsi:type="propertyExists">
-                  <configType>hdfs-site</configType>
-                  <property>dfs.nameservices</property>
-                </condition>
-              </conditions>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/namenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1800</timeout>
-          </commandScript>
-          <logs>
-            <log>
-              <logId>hdfs_namenode</logId>
-              <primary>true</primary>
-            </log>
-            <log>
-              <logId>hdfs_audit</logId>
-            </log>
-          </logs>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/namenode.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-            <customCommand>
-              <name>REBALANCEHDFS</name>
-              <background>true</background>
-              <commandScript>
-                <script>scripts/namenode.py</script>
-                <scriptType>PYTHON</scriptType>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>DATANODE</name>
-          <displayName>DataNode</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <decommissionAllowed>true</decommissionAllowed>
-          <commandScript>
-            <script>scripts/datanode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <bulkCommands>
-            <displayName>DataNodes</displayName>
-            <!-- Used by decommission and recommission -->
-            <masterComponent>NAMENODE</masterComponent>
-          </bulkCommands>
-          <logs>
-            <log>
-              <logId>hdfs_datanode</logId>
-              <primary>true</primary>
-            </log>
-          </logs>
-        </component>
-
-        <component>
-          <name>SECONDARY_NAMENODE</name>
-          <displayName>SNameNode</displayName>
-          <!-- TODO:  cardinality is conditional on HA usage -->
-          <cardinality>1</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <reassignAllowed>true</reassignAllowed>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/snamenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <logs>
-            <log>
-              <logId>hdfs_secondarynamenode</logId>
-              <primary>true</primary>
-            </log>
-          </logs>
-        </component>
-
-        <component>
-          <name>HDFS_CLIENT</name>
-          <displayName>HDFS Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <commandScript>
-            <script>scripts/hdfs_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>hdfs-site.xml</fileName>
-              <dictionaryName>hdfs-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
-            </configFile>                          
-            <configFile>
-              <type>env</type>
-              <fileName>hadoop-env.sh</fileName>
-              <dictionaryName>hadoop-env</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-
-        <component>
-          <name>JOURNALNODE</name>
-          <displayName>JournalNode</displayName>
-          <category>SLAVE</category>
-          <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <commandScript>
-            <script>scripts/journalnode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <logs>
-            <log>
-              <logId>hdfs_journalnode</logId>
-              <primary>true</primary>
-            </log>
-          </logs>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-        </component>
-
-        <component>
-          <name>ZKFC</name>
-          <displayName>ZKFailoverController</displayName>
-          <category>SLAVE</category>
-          <!-- TODO: cardinality is conditional on HA topology -->
-          <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <commandScript>
-            <script>scripts/zkfc_slave.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <logs>
-            <log>
-              <logId>hdfs_zkfc</logId>
-              <primary>true</primary>
-            </log>
-          </logs>
-        </component>
-
-        <component>
-          <name>NFS_GATEWAY</name>
-          <displayName>NFSGateway</displayName>
-          <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/nfsgateway.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hadoop</name>
-            </package>
-            <package>
-              <name>hadoop-lzo</name>
-              <skipUpgrade>true</skipUpgrade>
-              <condition>should_install_lzo</condition>
-            </package>
-          </packages>
-        </osSpecific>
-        
-        <osSpecific>
-          <osFamily>amazon2015,redhat6,redhat7,suse11</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-client</name>
-            </package>
-            <package>
-              <name>snappy</name>
-            </package>
-            <package>
-              <name>snappy-devel</name>
-            </package>
-            <package>
-              <name>lzo</name>
-              <skipUpgrade>true</skipUpgrade>
-              <condition>should_install_lzo</condition>
-            </package>
-            <package>
-              <name>hadoop-lzo-native</name>
-              <skipUpgrade>true</skipUpgrade>
-              <condition>should_install_lzo</condition>
-            </package>
-            <package>
-              <name>hadoop-libhdfs</name>
-            </package>
-          </packages>
-        </osSpecific>
-        
-        <osSpecific>
-          <osFamily>suse12</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-client</name>
-            </package>
-            <package>
-              <name>snappy</name>
-            </package>
-            <package>
-              <name>snappy-devel</name>
-            </package>
-            <package>
-              <name>liblzo2-2</name>
-              <skipUpgrade>true</skipUpgrade>
-              <condition>should_install_lzo</condition>
-            </package>
-            <package>
-              <name>hadoop-lzo-native</name>
-              <skipUpgrade>true</skipUpgrade>
-              <condition>should_install_lzo</condition>
-            </package>
-            <package>
-              <name>hadoop-libhdfs</name>
-            </package>
-          </packages>
-        </osSpecific>
-
-        <osSpecific>
-          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-client</name>
-            </package>
-            <package>
-              <name>libsnappy1</name>
-            </package>
-            <package>
-              <name>libsnappy-dev</name>
-            </package>
-            <package>
-              <name>liblzo2-2</name>
-              <skipUpgrade>true</skipUpgrade>
-              <condition>should_install_lzo</condition>
-            </package>
-            <package>
-              <name>hadoop-hdfs</name>
-            </package>
-            <package>
-              <name>libhdfs0</name>
-            </package>
-            <package>
-              <name>libhdfs0-dev</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>core-site</config-type>
-        <config-type>hdfs-site</config-type>
-        <config-type>hadoop-env</config-type>
-        <config-type>hadoop-policy</config-type>
-        <config-type>hdfs-log4j</config-type>
-        <config-type>ranger-hdfs-plugin-properties</config-type>
-        <config-type>ssl-client</config-type>
-        <config-type>ssl-server</config-type>
-        <config-type>ranger-hdfs-audit</config-type>
-        <config-type>ranger-hdfs-policymgr-ssl</config-type>
-        <config-type>ranger-hdfs-security</config-type>
-        <config-type>ams-ssl-client</config-type>
-        <config-type>hadoop-metrics2.properties</config-type>
-      </configuration-dependencies>
-      <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
-
-      <quickLinksConfigurations>
-        <quickLinksConfiguration>
-          <fileName>quicklinks.json</fileName>
-          <default>true</default>
-        </quickLinksConfiguration>
-      </quickLinksConfigurations>
-
-      <themes>
-        <theme>
-          <fileName>theme.json</fileName>
-          <default>true</default>
-        </theme>
-      </themes>
-    </service>
-  </services>
-</metainfo>


[04/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
deleted file mode 100644
index cdadc80..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
+++ /dev/null
@@ -1,479 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.stack_features import get_stack_feature_version
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.default import default
-from resource_management.libraries import functions
-from resource_management.libraries.functions import is_empty
-from resource_management.libraries.functions.get_architecture import get_architecture
-
-import status_params
-
-# a map of the Ambari role to the component name
-# for use with <stack-root>/current/<component>
-MAPR_SERVER_ROLE_DIRECTORY_MAP = {
-  'HISTORYSERVER' : 'hadoop-mapreduce-historyserver',
-  'MAPREDUCE2_CLIENT' : 'hadoop-mapreduce-client',
-}
-
-YARN_SERVER_ROLE_DIRECTORY_MAP = {
-  'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver',
-  'NODEMANAGER' : 'hadoop-yarn-nodemanager',
-  'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
-  'YARN_CLIENT' : 'hadoop-yarn-client'
-}
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-architecture = get_architecture()
-
-stack_name = status_params.stack_name
-stack_root = Script.get_stack_root()
-tarball_map = default("/configurations/cluster-env/tarball_map", None)
-
-config_path = os.path.join(stack_root, "current/hadoop-client/conf")
-config_dir = os.path.realpath(config_path)
-
-# This is expected to be of the form #.#.#.#
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted_major = format_stack_version(stack_version_unformatted)
-stack_version_formatted = functions.get_stack_version('hadoop-yarn-resourcemanager')
-
-stack_supports_ru = stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted_major)
-stack_supports_timeline_state_store = stack_version_formatted_major and check_stack_feature(StackFeature.TIMELINE_STATE_STORE, stack_version_formatted_major)
-
-# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade.
-# It cannot be used during the initial Cluser Install because the version is not yet known.
-version = default("/commandParams/version", None)
-
-# get the correct version to use for checking stack features
-version_for_stack_feature_checks = get_stack_feature_version(config)
-
-stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
-stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
-
-hostname = config['hostname']
-
-# hadoop default parameters
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_bin = stack_select.get_hadoop_dir("sbin")
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_yarn_home = '/usr/lib/hadoop-yarn'
-hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
-mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
-yarn_bin = "/usr/lib/hadoop-yarn/sbin"
-yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
-hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
-
-# hadoop parameters stack supporting rolling_uprade
-if stack_supports_ru:
-  # MapR directory root
-  mapred_role_root = "hadoop-mapreduce-client"
-  command_role = default("/role", "")
-  if command_role in MAPR_SERVER_ROLE_DIRECTORY_MAP:
-    mapred_role_root = MAPR_SERVER_ROLE_DIRECTORY_MAP[command_role]
-
-  # YARN directory root
-  yarn_role_root = "hadoop-yarn-client"
-  if command_role in YARN_SERVER_ROLE_DIRECTORY_MAP:
-    yarn_role_root = YARN_SERVER_ROLE_DIRECTORY_MAP[command_role]
-
-  hadoop_mapred2_jar_location = format("{stack_root}/current/{mapred_role_root}")
-  mapred_bin = format("{stack_root}/current/{mapred_role_root}/sbin")
-
-  hadoop_yarn_home = format("{stack_root}/current/{yarn_role_root}")
-  yarn_bin = format("{stack_root}/current/{yarn_role_root}/sbin")
-  yarn_container_bin = format("{stack_root}/current/{yarn_role_root}/bin")
-
-if stack_supports_timeline_state_store:
-  # Timeline Service property that was added timeline_state_store stack feature
-  ats_leveldb_state_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-state-store.path']
-
-# ats 1.5 properties
-entity_groupfs_active_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.active-dir']
-entity_groupfs_active_dir_mode = 01777
-entity_groupfs_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.done-dir']
-entity_groupfs_store_dir_mode = 0700
-
-hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
-
-limits_conf_dir = "/etc/security/limits.d"
-yarn_user_nofile_limit = default("/configurations/yarn-env/yarn_user_nofile_limit", "32768")
-yarn_user_nproc_limit = default("/configurations/yarn-env/yarn_user_nproc_limit", "65536")
-
-mapred_user_nofile_limit = default("/configurations/mapred-env/mapred_user_nofile_limit", "32768")
-mapred_user_nproc_limit = default("/configurations/mapred-env/mapred_user_nproc_limit", "65536")
-
-execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir + os.pathsep + yarn_container_bin
-
-ulimit_cmd = "ulimit -c unlimited;"
-
-mapred_user = status_params.mapred_user
-yarn_user = status_params.yarn_user
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
-
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
-smoke_hdfs_user_mode = 0770
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-nm_security_marker_dir = "/var/lib/hadoop-yarn"
-nm_security_marker = format('{nm_security_marker_dir}/nm_security_enabled')
-current_nm_security_state = os.path.isfile(nm_security_marker)
-toggle_nm_security = (current_nm_security_state and not security_enabled) or (not current_nm_security_state and security_enabled)
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-
-yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
-yarn_nodemanager_container_executor_class =  config['configurations']['yarn-site']['yarn.nodemanager.container-executor.class']
-is_linux_container_executor = (yarn_nodemanager_container_executor_class == 'org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor')
-container_executor_mode = 06050 if is_linux_container_executor else 02050
-kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-yarn_http_policy = config['configurations']['yarn-site']['yarn.http.policy']
-yarn_https_on = (yarn_http_policy.upper() == 'HTTPS_ONLY')
-rm_hosts = config['clusterHostInfo']['rm_host']
-rm_host = rm_hosts[0]
-rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
-rm_https_port = default('/configurations/yarn-site/yarn.resourcemanager.webapp.https.address', ":8090").split(':')[-1]
-# TODO UPGRADE default, update site during upgrade
-rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-rm_nodes_exclude_dir = os.path.dirname(rm_nodes_exclude_path)
-
-java64_home = config['hostLevelParams']['java_home']
-hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
-
-yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
-resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize']
-nodemanager_heapsize = config['configurations']['yarn-env']['nodemanager_heapsize']
-apptimelineserver_heapsize = default("/configurations/yarn-env/apptimelineserver_heapsize", 1024)
-ats_leveldb_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-timeline-store.path']
-ats_leveldb_lock_file = os.path.join(ats_leveldb_dir, "leveldb-timeline-store.ldb", "LOCK")
-yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
-yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
-mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
-mapred_log_dir_prefix = config['configurations']['mapred-env']['mapred_log_dir_prefix']
-mapred_env_sh_template = config['configurations']['mapred-env']['content']
-yarn_env_sh_template = config['configurations']['yarn-env']['content']
-yarn_nodemanager_recovery_dir = default('/configurations/yarn-site/yarn.nodemanager.recovery.dir', None)
-service_check_queue_name = default('/configurations/yarn-env/service_check.queue.name', 'default')
-
-if len(rm_hosts) > 1:
-  additional_rm_host = rm_hosts[1]
-  rm_webui_address = format("{rm_host}:{rm_port},{additional_rm_host}:{rm_port}")
-  rm_webui_https_address = format("{rm_host}:{rm_https_port},{additional_rm_host}:{rm_https_port}")
-else:
-  rm_webui_address = format("{rm_host}:{rm_port}")
-  rm_webui_https_address = format("{rm_host}:{rm_https_port}")
-
-if security_enabled:
-  tc_mode = 0644
-  tc_owner = "root"
-else:
-  tc_mode = None
-  tc_owner = hdfs_user
-
-nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
-hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
-nm_address = config['configurations']['yarn-site']['yarn.nodemanager.address']  # still contains 0.0.0.0
-if hostname and nm_address and nm_address.startswith("0.0.0.0:"):
-  nm_address = nm_address.replace("0.0.0.0", hostname)
-
-# Initialize lists of work directories.
-nm_local_dirs = default("/configurations/yarn-site/yarn.nodemanager.local-dirs", "")
-nm_log_dirs = default("/configurations/yarn-site/yarn.nodemanager.log-dirs", "")
-
-nm_local_dirs_list = nm_local_dirs.split(',')
-nm_log_dirs_list = nm_log_dirs.split(',')
-
-nm_log_dir_to_mount_file = "/var/lib/ambari-agent/data/yarn/yarn_log_dir_mount.hist"
-nm_local_dir_to_mount_file = "/var/lib/ambari-agent/data/yarn/yarn_local_dir_mount.hist"
-
-distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
-hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
-
-entity_file_history_directory = "/tmp/entity-file-history/active"
-
-yarn_pid_dir = status_params.yarn_pid_dir
-mapred_pid_dir = status_params.mapred_pid_dir
-
-mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
-yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
-mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
-yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-#exclude file
-exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
-exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-
-ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
-has_ats = not len(ats_host) == 0
-
-nm_hosts = default("/clusterHostInfo/nm_hosts", [])
-
-# don't using len(nm_hosts) here, because check can take too much time on large clusters
-number_of_nm = 1
-
-# default kinit commands
-rm_kinit_cmd = ""
-yarn_timelineservice_kinit_cmd = ""
-nodemanager_kinit_cmd = ""
-
-if security_enabled:
-  rm_principal_name = config['configurations']['yarn-site']['yarn.resourcemanager.principal']
-  rm_principal_name = rm_principal_name.replace('_HOST',hostname.lower())
-  rm_keytab = config['configurations']['yarn-site']['yarn.resourcemanager.keytab']
-  rm_kinit_cmd = format("{kinit_path_local} -kt {rm_keytab} {rm_principal_name};")
-
-  # YARN timeline security options
-  if has_ats:
-    _yarn_timelineservice_principal_name = config['configurations']['yarn-site']['yarn.timeline-service.principal']
-    _yarn_timelineservice_principal_name = _yarn_timelineservice_principal_name.replace('_HOST', hostname.lower())
-    _yarn_timelineservice_keytab = config['configurations']['yarn-site']['yarn.timeline-service.keytab']
-    yarn_timelineservice_kinit_cmd = format("{kinit_path_local} -kt {_yarn_timelineservice_keytab} {_yarn_timelineservice_principal_name};")
-
-  if 'yarn.nodemanager.principal' in config['configurations']['yarn-site']:
-    _nodemanager_principal_name = default('/configurations/yarn-site/yarn.nodemanager.principal', None)
-    if _nodemanager_principal_name:
-      _nodemanager_principal_name = _nodemanager_principal_name.replace('_HOST', hostname.lower())
-
-    _nodemanager_keytab = config['configurations']['yarn-site']['yarn.nodemanager.keytab']
-    nodemanager_kinit_cmd = format("{kinit_path_local} -kt {_nodemanager_keytab} {_nodemanager_principal_name};")
-
-
-yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']
-yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']
-mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']
-mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']
-jobhistory_heapsize = default("/configurations/mapred-env/jobhistory_heapsize", "900")
-jhs_leveldb_state_store_dir = default('/configurations/mapred-site/mapreduce.jobhistory.recovery.store.leveldb.path', "/hadoop/mapreduce/jhs")
-
-# Tez-related properties
-tez_user = config['configurations']['tez-env']['tez_user']
-
-# Tez jars
-tez_local_api_jars = '/usr/lib/tez/tez*.jar'
-tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
-app_dir_files = {tez_local_api_jars:None}
-
-# Tez libraries
-tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
-
-#for create_hdfs_directory
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-
-
-
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-is_webhdfs_enabled = hdfs_site['dfs.webhdfs.enabled']
-
-# Path to file that contains list of HDFS resources to be skipped during processing
-hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore"
-
-dfs_type = default("/commandParams/dfs_type", "")
-
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file = hdfs_resource_ignore_file,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs,
-  immutable_paths = get_not_managed_resources(),
-  dfs_type = dfs_type
- )
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
-
-mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
-
-#taskcontroller.cfg
-
-mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-min_user_id = config['configurations']['yarn-env']['min_user_id']
-
-# Node labels
-node_labels_dir = default("/configurations/yarn-site/yarn.node-labels.fs-store.root-dir", None)
-node_label_enable = config['configurations']['yarn-site']['yarn.node-labels.enabled']
-
-cgroups_dir = "/cgroups_test/cpu"
-
-# ***********************  RANGER PLUGIN CHANGES ***********************
-# ranger host
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-# hostname of the active HDFS HA Namenode (only used when HA is enabled)
-dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
-if dfs_ha_namenode_active is not None: 
-  namenode_hostname = dfs_ha_namenode_active
-else:
-  namenode_hostname = config['clusterHostInfo']['namenode_host'][0]
-
-ranger_admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir","/var/log/ranger/admin")
-
-scheme = 'http' if not yarn_https_on else 'https'
-yarn_rm_address = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'] if not yarn_https_on else config['configurations']['yarn-site']['yarn.resourcemanager.webapp.https.address']
-rm_active_port = rm_https_port if yarn_https_on else rm_port
-
-rm_ha_enabled = False
-rm_ha_ids_list = []
-rm_webapp_addresses_list = [yarn_rm_address]
-rm_ha_ids = default("/configurations/yarn-site/yarn.resourcemanager.ha.rm-ids", None)
-
-if rm_ha_ids:
-  rm_ha_ids_list = rm_ha_ids.split(",")
-  if len(rm_ha_ids_list) > 1:
-    rm_ha_enabled = True
-
-if rm_ha_enabled:
-  rm_webapp_addresses_list = []
-  for rm_id in rm_ha_ids_list:
-    rm_webapp_address_property = format('yarn.resourcemanager.webapp.address.{rm_id}') if not yarn_https_on else format('yarn.resourcemanager.webapp.https.address.{rm_id}')
-    rm_webapp_address = config['configurations']['yarn-site'][rm_webapp_address_property]
-    rm_webapp_addresses_list.append(rm_webapp_address)
-
-#ranger yarn properties
-if has_ranger_admin:
-  is_supported_yarn_ranger = config['configurations']['yarn-env']['is_supported_yarn_ranger']
-
-  if is_supported_yarn_ranger:
-    enable_ranger_yarn = (config['configurations']['ranger-yarn-plugin-properties']['ranger-yarn-plugin-enabled'].lower() == 'yes')
-    policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
-    if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
-      policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
-    xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
-    xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
-    xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
-    xa_audit_db_password = ''
-    if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
-      xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
-    xa_db_host = config['configurations']['admin-properties']['db_host']
-    repo_name = str(config['clusterName']) + '_yarn'
-
-    ranger_env = config['configurations']['ranger-env']
-    ranger_plugin_properties = config['configurations']['ranger-yarn-plugin-properties']
-    policy_user = config['configurations']['ranger-yarn-plugin-properties']['policy_user']
-    yarn_rest_url = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']  
-
-    ranger_plugin_config = {
-      'username' : config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
-      'password' : unicode(config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
-      'yarn.url' : format('{scheme}://{yarn_rest_url}'),
-      'commonNameForCertificate' : config['configurations']['ranger-yarn-plugin-properties']['common.name.for.certificate']
-    }
-
-    yarn_ranger_plugin_repo = {
-      'isEnabled': 'true',
-      'configs': ranger_plugin_config,
-      'description': 'yarn repo',
-      'name': repo_name,
-      'repositoryType': 'yarn',
-      'type': 'yarn',
-      'assetType': '1'
-    }
-
-    if stack_supports_ranger_kerberos:
-      ranger_plugin_config['ambari.service.check.user'] = policy_user
-      ranger_plugin_config['hadoop.security.authentication'] = 'kerberos' if security_enabled else 'simple'
-
-    if stack_supports_ranger_kerberos and security_enabled:
-      ranger_plugin_config['policy.download.auth.users'] = yarn_user
-      ranger_plugin_config['tag.download.auth.users'] = yarn_user
-
-    #For curl command in ranger plugin to get db connector
-    jdk_location = config['hostLevelParams']['jdk_location']
-    java_share_dir = '/usr/share/java'
-    previous_jdbc_jar_name = None
-    if stack_supports_ranger_audit_db:
-      if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
-        jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
-        jdbc_driver = "com.mysql.jdbc.Driver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
-        jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
-        colon_count = xa_db_host.count(':')
-        if colon_count == 2 or colon_count == 0:
-          audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
-        else:
-          audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
-        jdbc_driver = "oracle.jdbc.OracleDriver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
-        jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
-        jdbc_driver = "org.postgresql.Driver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
-        jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
-        jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
-        jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
-        jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
-
-    downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-    driver_curl_target = format("{hadoop_yarn_home}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-    previous_jdbc_jar = format("{hadoop_yarn_home}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-
-    xa_audit_db_is_enabled = False
-    ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
-    if xml_configurations_supported and stack_supports_ranger_audit_db:
-      xa_audit_db_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.db']
-    xa_audit_hdfs_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
-    ssl_keystore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
-    ssl_truststore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
-    credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
-
-    #For SQLA explicitly disable audit to DB for Ranger
-    if xa_audit_db_flavor == 'sqla':
-      xa_audit_db_is_enabled = False

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py
deleted file mode 100644
index 52918d2e..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.format import format
-from resource_management.libraries import functions
-from resource_management.libraries.functions import is_empty
-import os
-from status_params import *
-
-# server configurations
-config = Script.get_config()
-
-hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
-yarn_user = hadoop_user
-hdfs_user = hadoop_user
-smokeuser = hadoop_user
-config_dir = os.environ["HADOOP_CONF_DIR"]
-hadoop_home = os.environ["HADOOP_HOME"]
-
-yarn_home = os.environ["HADOOP_YARN_HOME"]
-
-hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-rm_host = config['clusterHostInfo']['rm_host'][0]
-rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
-rm_https_port = "8090"
-rm_webui_address = format("{rm_host}:{rm_port}")
-rm_webui_https_address = format("{rm_host}:{rm_https_port}")
-
-hs_host = config['clusterHostInfo']['hs_host'][0]
-hs_port = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'].split(':')[-1]
-hs_webui_address = format("{hs_host}:{hs_port}")
-
-hadoop_mapred2_jar_location = os.path.join(os.environ["HADOOP_COMMON_HOME"], "share", "hadoop", "mapreduce")
-hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
-
-exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
-exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
deleted file mode 100644
index e053fe6..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
+++ /dev/null
@@ -1,293 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-from resource_management.libraries.functions.decorator import retry
-from resource_management.core.resources.system import File, Execute
-from resource_management.core.source import Template
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
-from resource_management.libraries.providers.hdfs_resource import HdfsResourceProvider
-from resource_management import is_empty
-from resource_management import shell
-
-
-from yarn import yarn
-from service import service
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from setup_ranger_yarn import setup_ranger_yarn
-
-
-class Resourcemanager(Script):
-  def install(self, env):
-    self.install_packages(env)
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    service('resourcemanager', action='stop')
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn(name='resourcemanager')
-
-  def refreshqueues(self, env):
-    pass
-
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class ResourcemanagerWindows(Resourcemanager):
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    service('resourcemanager', action='start')
-
-  def status(self, env):
-    service('resourcemanager', action='status')
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    yarn_user = params.yarn_user
-
-    yarn_refresh_cmd = format("cmd /c yarn rmadmin -refreshNodes")
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=yarn_user,
-         mode="f"
-    )
-
-    if params.update_exclude_file_only == False:
-      Execute(yarn_refresh_cmd, user=yarn_user)
-
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class ResourcemanagerDefault(Resourcemanager):
-  def get_component_name(self):
-    return "hadoop-yarn-resourcemanager"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade post-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-yarn-resourcemanager", params.version)
-
-  def start(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    if params.has_ranger_admin and params.is_supported_yarn_ranger:
-      setup_ranger_yarn() #Ranger Yarn Plugin related calls
-
-    # wait for active-dir and done-dir to be created by ATS if needed
-    if params.has_ats:
-      Logger.info("Verifying DFS directories where ATS stores time line data for active and completed applications.")
-      self.wait_for_dfs_directories_created(params.entity_groupfs_store_dir, params.entity_groupfs_active_dir)
-
-    service('resourcemanager', action='start')
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.resourcemanager_pid_file)
-    pass
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.resourcemanager.principal",
-                           "yarn.resourcemanager.keytab",
-                           "yarn.resourcemanager.webapp.spnego-principal",
-                           "yarn.resourcemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.resourcemanager.keytab",
-                          "yarn.resourcemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.resourcemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.resourcemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.resourcemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.resourcemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.keytab'],
-                                security_params['yarn-site']['yarn.resourcemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-  def refreshqueues(self, env):
-    import params
-
-    self.configure(env)
-    env.set_params(params)
-
-    service('resourcemanager',
-            action='refreshQueues'
-    )
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    rm_kinit_cmd = params.rm_kinit_cmd
-    yarn_user = params.yarn_user
-    conf_dir = params.hadoop_conf_dir
-    user_group = params.user_group
-
-    yarn_refresh_cmd = format("{rm_kinit_cmd} yarn --config {conf_dir} rmadmin -refreshNodes")
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=yarn_user,
-         group=user_group
-    )
-
-    if params.update_exclude_file_only == False:
-      Execute(yarn_refresh_cmd,
-            environment= {'PATH' : params.execute_path },
-            user=yarn_user)
-      pass
-    pass
-
-
-
-
-  def wait_for_dfs_directories_created(self, *dirs):
-    import params
-
-    ignored_dfs_dirs = HdfsResourceProvider.get_ignored_resources_list(params.hdfs_resource_ignore_file)
-
-    if params.security_enabled:
-      Execute(params.rm_kinit_cmd,
-              user=params.yarn_user
-      )
-      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
-        user=params.hdfs_user
-      )
-
-    for dir_path in dirs:
-      self.wait_for_dfs_directory_created(dir_path, ignored_dfs_dirs)
-
-
-  @retry(times=8, sleep_time=20, backoff_factor=1, err_class=Fail)
-  def wait_for_dfs_directory_created(self, dir_path, ignored_dfs_dirs):
-    import params
-
-
-    if not is_empty(dir_path):
-      dir_path = HdfsResourceProvider.parse_path(dir_path)
-
-      if dir_path in ignored_dfs_dirs:
-        Logger.info("Skipping DFS directory '" + dir_path + "' as it's marked to be ignored.")
-        return
-
-      Logger.info("Verifying if DFS directory '" + dir_path + "' exists.")
-
-      dir_exists = None
-
-      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
-        # check with webhdfs is much faster than executing hdfs dfs -test
-        util = WebHDFSUtil(params.hdfs_site, params.hdfs_user, params.security_enabled)
-        list_status = util.run_command(dir_path, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
-        dir_exists = ('FileStatus' in list_status)
-      else:
-        # have to do time expensive hdfs dfs -d check.
-        dfs_ret_code = shell.call(format("hdfs --config {hadoop_conf_dir} dfs -test -d " + dir_path), user=params.yarn_user)[0]
-        dir_exists = not dfs_ret_code #dfs -test -d returns 0 in case the dir exists
-
-      if not dir_exists:
-        raise Fail("DFS directory '" + dir_path + "' does not exist !")
-      else:
-        Logger.info("DFS directory '" + dir_path + "' exists.")
-
-  def get_log_folder(self):
-    import params
-    return params.yarn_log_dir
-  
-  def get_user(self):
-    import params
-    return params.yarn_user
-
-  def get_pid_files(self):
-    import status_params
-    return [status_params.resourcemanager_pid_file]
-  
-if __name__ == "__main__":
-  Resourcemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service.py
deleted file mode 100644
index 78b2428..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service.py
+++ /dev/null
@@ -1,106 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-from resource_management.core.shell import as_user, as_sudo
-from resource_management.libraries.functions.show_logs import show_logs
-from resource_management.libraries.functions.format import format
-from resource_management.core.resources.system import Execute, File
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def service(componentName, action='start', serviceName='yarn'):
-  import status_params
-  if status_params.service_map.has_key(componentName):
-    service_name = status_params.service_map[componentName]
-    if action == 'start' or action == 'stop':
-      Service(service_name, action=action)
-    elif action == 'status':
-      check_windows_service_status(service_name)
-
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def service(componentName, action='start', serviceName='yarn'):
-  import params
-
-  if serviceName == 'mapreduce' and componentName == 'historyserver':
-    delete_pid_file = True
-    daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
-    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")
-    usr = params.mapred_user
-    log_dir = params.mapred_log_dir
-  else:
-    # !!! yarn-daemon.sh deletes the PID for us; if we remove it the script
-    # may not work correctly when stopping the service
-    delete_pid_file = False
-    daemon = format("{yarn_bin}/yarn-daemon.sh")
-    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{componentName}.pid")
-    usr = params.yarn_user
-    log_dir = params.yarn_log_dir
-
-  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {hadoop_conf_dir}")
-
-  if action == 'start':
-    daemon_cmd = format("{ulimit_cmd} {cmd} start {componentName}")
-    check_process = as_sudo(["test", "-f", pid_file]) + " && " + as_sudo(["pgrep", "-F", pid_file])
-
-    # Remove the pid file if its corresponding process is not running.
-    File(pid_file, action = "delete", not_if = check_process)
-
-    if componentName == 'timelineserver' and serviceName == 'yarn':
-      File(params.ats_leveldb_lock_file,
-         action = "delete",
-         only_if = format("ls {params.ats_leveldb_lock_file}"),
-         not_if = check_process,
-         ignore_failures = True
-      )
-
-    try:
-      # Attempt to start the process. Internally, this is skipped if the process is already running.
-      Execute(daemon_cmd, user = usr, not_if = check_process)
-  
-      # Ensure that the process with the expected PID exists.
-      Execute(check_process,
-              not_if = check_process,
-              tries=5,
-              try_sleep=1,
-      )
-    except:
-      show_logs(log_dir, usr)
-      raise
-
-  elif action == 'stop':
-    daemon_cmd = format("{cmd} stop {componentName}")
-    try:
-      Execute(daemon_cmd, user=usr)
-    except:
-      show_logs(log_dir, usr)
-      raise
-
-    # !!! yarn-daemon doesn't need us to delete PIDs
-    if delete_pid_file is True:
-      File(pid_file, action="delete")
-
-
-  elif action == 'refreshQueues':
-    rm_kinit_cmd = params.rm_kinit_cmd
-    refresh_cmd = format("{rm_kinit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {yarn_container_bin}/yarn rmadmin -refreshQueues")
-    Execute(refresh_cmd, user=usr)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service_check.py
deleted file mode 100644
index b934767..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/service_check.py
+++ /dev/null
@@ -1,185 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import re
-import subprocess
-from ambari_commons import os_utils
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.get_user_call_output import get_user_call_output
-from resource_management.core.exceptions import Fail
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute, File
-from resource_management.core.source import StaticFile
-from resource_management.core import shell
-
-CURL_CONNECTION_TIMEOUT = '5'
-
-class ServiceCheck(Script):
-  def service_check(self, env):
-    pass
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class ServiceCheckWindows(ServiceCheck):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    yarn_exe = os_utils.quote_path(os.path.join(params.yarn_home, "bin", "yarn.cmd"))
-
-    run_yarn_check_cmd = "cmd /C %s node -list" % yarn_exe
-
-    component_type = 'rm'
-    if params.hadoop_ssl_enabled:
-      component_address = params.rm_webui_https_address
-    else:
-      component_address = params.rm_webui_address
-
-    #temp_dir = os.path.abspath(os.path.join(params.hadoop_home, os.pardir)), "/tmp"
-    temp_dir = os.path.join(os.path.dirname(params.hadoop_home), "temp")
-    validateStatusFileName = "validateYarnComponentStatusWindows.py"
-    validateStatusFilePath = os.path.join(temp_dir, validateStatusFileName)
-    python_executable = sys.executable
-    validateStatusCmd = "%s %s %s -p %s -s %s" % (python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
-
-    if params.security_enabled:
-      kinit_cmd = "%s -kt %s %s;" % (params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
-      smoke_cmd = kinit_cmd + ' ' + validateStatusCmd
-    else:
-      smoke_cmd = validateStatusCmd
-
-    File(validateStatusFilePath,
-         content=StaticFile(validateStatusFileName)
-    )
-
-    Execute(smoke_cmd,
-            tries=3,
-            try_sleep=5,
-            logoutput=True
-    )
-
-    Execute(run_yarn_check_cmd, logoutput=True)
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class ServiceCheckDefault(ServiceCheck):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    params.HdfsResource(format("/user/{smokeuser}"),
-                        type="directory",
-                        action="create_on_execute",
-                        owner=params.smokeuser,
-                        mode=params.smoke_hdfs_user_mode,
-                        )
-
-    if params.stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted_major):
-      path_to_distributed_shell_jar = format("{stack_root}/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar")
-    else:
-      path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar"
-
-    yarn_distrubuted_shell_check_params = ["yarn org.apache.hadoop.yarn.applications.distributedshell.Client",
-                                           "-shell_command", "ls", "-num_containers", "{number_of_nm}",
-                                           "-jar", "{path_to_distributed_shell_jar}", "-timeout", "300000",
-                                           "--queue", "{service_check_queue_name}"]
-    yarn_distrubuted_shell_check_cmd = format(" ".join(yarn_distrubuted_shell_check_params))
-
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
-      smoke_cmd = format("{kinit_cmd} {yarn_distrubuted_shell_check_cmd}")
-    else:
-      smoke_cmd = yarn_distrubuted_shell_check_cmd
-
-    return_code, out = shell.checked_call(smoke_cmd,
-                                          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-                                          user=params.smokeuser,
-                                          )
-
-    m = re.search("appTrackingUrl=(.*),\s", out)
-    app_url = m.group(1)
-
-    splitted_app_url = str(app_url).split('/')
-
-    for item in splitted_app_url:
-      if "application" in item:
-        application_name = item
-
-    # Find out the active RM from RM list
-    # Raise an exception if the active rm cannot be determined
-    active_rm_webapp_address = self.get_active_rm_webapp_address()
-    Logger.info("Active Resource Manager web app address is : " + active_rm_webapp_address);
-
-    # Verify job state from active resource manager via rest api
-    info_app_url = params.scheme + "://" + active_rm_webapp_address + "/ws/v1/cluster/apps/" + application_name
-    get_app_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
-
-    return_code, stdout, _ = get_user_call_output(get_app_info_cmd,
-                                                  user=params.smokeuser,
-                                                  path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-                                                  )
-
-    try:
-      json_response = json.loads(stdout)
-    except Exception as e:
-      raise Fail(format("Response from YARN API was not a valid JSON. Response: {stdout}"))
-
-    if json_response is None or 'app' not in json_response or \
-            'state' not in json_response['app'] or 'finalStatus' not in json_response['app']:
-      raise Fail("Application " + app_url + " returns invalid data.")
-
-    if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
-      raise Fail("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
-
-  def get_active_rm_webapp_address(self):
-    import params
-    active_rm_webapp_address = None
-    rm_webapp_addresses = params.rm_webapp_addresses_list
-    if rm_webapp_addresses is not None and len(rm_webapp_addresses) > 0:
-      for rm_webapp_address in rm_webapp_addresses:
-        rm_state_url = params.scheme + "://" + rm_webapp_address + "/ws/v1/cluster/info"
-        get_cluster_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + rm_state_url
-        try:
-          return_code, stdout, _ = get_user_call_output(get_cluster_info_cmd,
-                                                        user=params.smokeuser,
-                                                        path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-                                                        )
-          json_response = json.loads(stdout)
-          if json_response is not None and 'clusterInfo' in json_response \
-            and json_response['clusterInfo']['haState'] == "ACTIVE":
-              active_rm_webapp_address = rm_webapp_address
-              break
-        except Exception as e:
-          Logger.warning(format("Cluster info is not available from calling {get_cluster_info_cmd}"))
-
-    if active_rm_webapp_address is None:
-      raise Fail('Resource Manager state is not available. Failed to determine the active Resource Manager web application address from {0}'.format(','.join(rm_webapp_addresses)));
-    return active_rm_webapp_address
-
-if __name__ == "__main__":
-  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/setup_ranger_yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/setup_ranger_yarn.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/setup_ranger_yarn.py
deleted file mode 100644
index 6ea7f82..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/setup_ranger_yarn.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-    http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from resource_management.core.logger import Logger
-
-def setup_ranger_yarn():
-  import params
-
-  if params.has_ranger_admin:
-
-    from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
-
-    if params.retryAble:
-      Logger.info("YARN: Setup ranger: command retry enables thus retrying if ranger admin is down !")
-    else:
-      Logger.info("YARN: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
-
-    if params.xml_configurations_supported and params.enable_ranger_yarn and params.xa_audit_hdfs_is_enabled:
-      params.HdfsResource("/ranger/audit",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hdfs_user,
-                         group=params.hdfs_user,
-                         mode=0755,
-                         recursive_chmod=True
-      )
-      params.HdfsResource("/ranger/audit/yarn",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.yarn_user,
-                         group=params.yarn_user,
-                         mode=0700,
-                         recursive_chmod=True
-      )
-      params.HdfsResource(None, action="execute")
-
-    setup_ranger_plugin('hadoop-yarn-resourcemanager', 'yarn', params.previous_jdbc_jar,
-                        params.downloaded_custom_connector, params.driver_curl_source,
-                        params.driver_curl_target, params.java64_home,
-                        params.repo_name, params.yarn_ranger_plugin_repo,
-                        params.ranger_env, params.ranger_plugin_properties,
-                        params.policy_user, params.policymgr_mgr_url,
-                        params.enable_ranger_yarn, conf_dict=params.hadoop_conf_dir,
-                        component_user=params.yarn_user, component_group=params.user_group, cache_service_list=['yarn'],
-                        plugin_audit_properties=params.config['configurations']['ranger-yarn-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-yarn-audit'],
-                        plugin_security_properties=params.config['configurations']['ranger-yarn-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-yarn-security'],
-                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-yarn-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-yarn-policymgr-ssl'],
-                        component_list=['hadoop-yarn-resourcemanager'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
-                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
-                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        api_version = 'v2', skip_if_rangeradmin_down= not params.retryAble,
-                        is_security_enabled = params.security_enabled,
-                        is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
-                        component_user_principal=params.rm_principal_name if params.security_enabled else None,
-                        component_user_keytab=params.rm_keytab if params.security_enabled else None
-      )
-  else:
-    Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/status_params.py
deleted file mode 100644
index c2e9d92..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/status_params.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.script.script import Script
-from resource_management.libraries import functions
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.default import default
-from ambari_commons import OSCheck
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-if OSCheck.is_windows_family():
-  resourcemanager_win_service_name = 'resourcemanager'
-  nodemanager_win_service_name = 'nodemanager'
-  historyserver_win_service_name = 'historyserver'
-  timelineserver_win_service_name = 'timelineserver'
-
-  service_map = {
-    'resourcemanager' : resourcemanager_win_service_name,
-    'nodemanager' : nodemanager_win_service_name,
-    'historyserver' : historyserver_win_service_name,
-    'timelineserver' : timelineserver_win_service_name
-  }
-else:
-  mapred_user = config['configurations']['mapred-env']['mapred_user']
-  yarn_user = config['configurations']['yarn-env']['yarn_user']
-  yarn_pid_dir_prefix = config['configurations']['yarn-env']['yarn_pid_dir_prefix']
-  mapred_pid_dir_prefix = config['configurations']['mapred-env']['mapred_pid_dir_prefix']
-  yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
-  mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
-
-  resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
-  nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
-  yarn_historyserver_pid_file_old = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
-  yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-timelineserver.pid")  # *-historyserver.pid is deprecated
-  mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
-
-  hadoop_conf_dir = functions.conf_select.get_hadoop_conf_dir()
-
-  hostname = config['hostname']
-  kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-  security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py
deleted file mode 100644
index 70ed5b3..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py
+++ /dev/null
@@ -1,498 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-# Python Imports
-import os
-
-# Ambari Common and Resource Management Imports
-from resource_management.libraries.script.script import Script
-from resource_management.core.resources.service import ServiceConfig
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.core.resources.system import Directory
-from resource_management.core.resources.system import File
-from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.core.source import InlineTemplate, Template
-from resource_management.core.logger import Logger
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-
-from resource_management.libraries.functions.mounted_dirs_helper import handle_mounted_dirs
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def yarn(name=None, config_dir=None):
-  """
-  :param name: Component name, apptimelineserver, nodemanager, resourcemanager, or None (defaults for client)
-  :param config_dir: Which config directory to write configs to, which could be different during rolling upgrade.
-  """
-  import params
-
-  if name == 'resourcemanager':
-    setup_resourcemanager()
-  elif name == 'nodemanager':
-    setup_nodemanager()
-  elif name == 'apptimelineserver':
-    setup_ats()
-  elif name == 'historyserver':
-    setup_historyserver()
-
-  if config_dir is None:
-    config_dir = params.hadoop_conf_dir
-
-  if params.yarn_nodemanager_recovery_dir:
-    Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
-              owner=params.yarn_user,
-              group=params.user_group,
-              create_parents = True,
-              mode=0755,
-              cd_access = 'a',
-    )
-
-  Directory([params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
-            owner=params.yarn_user,
-            group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
-  )
-
-  Directory([params.mapred_pid_dir_prefix, params.mapred_pid_dir, params.mapred_log_dir_prefix, params.mapred_log_dir],
-            owner=params.mapred_user,
-            group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
-  )
-  Directory([params.yarn_log_dir_prefix],
-            owner=params.yarn_user,
-            group=params.user_group,
-            create_parents = True,
-            ignore_failures=True,
-            cd_access = 'a',
-  )
-
-  XmlConfig("core-site.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['core-site'],
-            configuration_attributes=params.config['configuration_attributes']['core-site'],
-            owner=params.hdfs_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  # During RU, Core Masters and Slaves need hdfs-site.xml
-  # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
-  # RU should rely on all available in <stack-root>/<version>/hadoop/conf
-  XmlConfig("hdfs-site.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['hdfs-site'],
-            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-            owner=params.hdfs_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("mapred-site.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("yarn-site.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['yarn-site'],
-            configuration_attributes=params.config['configuration_attributes']['yarn-site'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("capacity-scheduler.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['capacity-scheduler'],
-            configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  File(format("{limits_conf_dir}/yarn.conf"),
-       mode=0644,
-       content=Template('yarn.conf.j2')
-  )
-
-  File(format("{limits_conf_dir}/mapreduce.conf"),
-       mode=0644,
-       content=Template('mapreduce.conf.j2')
-  )
-
-  File(os.path.join(config_dir, "yarn-env.sh"),
-       owner=params.yarn_user,
-       group=params.user_group,
-       mode=0755,
-       content=InlineTemplate(params.yarn_env_sh_template)
-  )
-
-  File(format("{yarn_container_bin}/container-executor"),
-      group=params.yarn_executor_container_group,
-      mode=params.container_executor_mode
-  )
-
-  File(os.path.join(config_dir, "container-executor.cfg"),
-      group=params.user_group,
-      mode=0644,
-      content=Template('container-executor.cfg.j2')
-  )
-
-  Directory(params.cgroups_dir,
-            group=params.user_group,
-            create_parents = True,
-            mode=0755,
-            cd_access="a")
-
-  File(os.path.join(config_dir, "mapred-env.sh"),
-       owner=params.tc_owner,
-       mode=0755,
-       content=InlineTemplate(params.mapred_env_sh_template)
-  )
-
-  if params.security_enabled:
-    File(os.path.join(params.hadoop_bin, "task-controller"),
-         owner="root",
-         group=params.mapred_tt_group,
-         mode=06050
-    )
-    File(os.path.join(config_dir, 'taskcontroller.cfg'),
-         owner = params.tc_owner,
-         mode = params.tc_mode,
-         group = params.mapred_tt_group,
-         content=Template("taskcontroller.cfg.j2")
-    )
-  else:
-    File(os.path.join(config_dir, 'taskcontroller.cfg'),
-         owner=params.tc_owner,
-         content=Template("taskcontroller.cfg.j2")
-    )
-
-  XmlConfig("mapred-site.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-            owner=params.mapred_user,
-            group=params.user_group
-  )
-
-  XmlConfig("capacity-scheduler.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations'][
-              'capacity-scheduler'],
-            configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  if "ssl-client" in params.config['configurations']:
-    XmlConfig("ssl-client.xml",
-              conf_dir=config_dir,
-              configurations=params.config['configurations']['ssl-client'],
-              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-    Directory(params.hadoop_conf_secure_dir,
-              create_parents = True,
-              owner='root',
-              group=params.user_group,
-              cd_access='a',
-              )
-
-    XmlConfig("ssl-client.xml",
-              conf_dir=params.hadoop_conf_secure_dir,
-              configurations=params.config['configurations']['ssl-client'],
-              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  if "ssl-server" in params.config['configurations']:
-    XmlConfig("ssl-server.xml",
-              conf_dir=config_dir,
-              configurations=params.config['configurations']['ssl-server'],
-              configuration_attributes=params.config['configuration_attributes']['ssl-server'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-  if os.path.exists(os.path.join(config_dir, 'fair-scheduler.xml')):
-    File(os.path.join(config_dir, 'fair-scheduler.xml'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  if os.path.exists(
-    os.path.join(config_dir, 'ssl-client.xml.example')):
-    File(os.path.join(config_dir, 'ssl-client.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  if os.path.exists(
-    os.path.join(config_dir, 'ssl-server.xml.example')):
-    File(os.path.join(config_dir, 'ssl-server.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-def setup_historyserver():
-  import params
-
-  if params.yarn_log_aggregation_enabled:
-    params.HdfsResource(params.yarn_nm_app_log_dir,
-                         action="create_on_execute",
-                         type="directory",
-                         owner=params.yarn_user,
-                         group=params.user_group,
-                         mode=01777,
-                         recursive_chmod=True
-    )
-
-  # create the /tmp folder with proper permissions if it doesn't exist yet
-  if params.entity_file_history_directory.startswith('/tmp'):
-      params.HdfsResource(params.hdfs_tmp_dir,
-                          action="create_on_execute",
-                          type="directory",
-                          owner=params.hdfs_user,
-                          mode=0777,
-      )
-
-  params.HdfsResource(params.entity_file_history_directory,
-                         action="create_on_execute",
-                         type="directory",
-                         owner=params.yarn_user,
-                         group=params.user_group
-  )
-  params.HdfsResource("/mapred",
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.mapred_user
-  )
-  params.HdfsResource("/mapred/system",
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.hdfs_user
-  )
-  params.HdfsResource(params.mapreduce_jobhistory_done_dir,
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.mapred_user,
-                       group=params.user_group,
-                       change_permissions_for_parents=True,
-                       mode=0777
-  )
-  params.HdfsResource(None, action="execute")
-  Directory(params.jhs_leveldb_state_store_dir,
-            owner=params.mapred_user,
-            group=params.user_group,
-            create_parents = True,
-            cd_access="a",
-            recursive_ownership = True,
-            )
-
-def setup_nodemanager():
-  import params
-
-  # First start after enabling/disabling security
-  if params.toggle_nm_security:
-    Directory(params.nm_local_dirs_list + params.nm_log_dirs_list,
-              action='delete'
-    )
-
-    # If yarn.nodemanager.recovery.dir exists, remove this dir
-    if params.yarn_nodemanager_recovery_dir:
-      Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
-                action='delete'
-      )
-
-    # Setting NM marker file
-    if params.security_enabled:
-      Directory(params.nm_security_marker_dir)
-      File(params.nm_security_marker,
-           content="Marker file to track first start after enabling/disabling security. "
-                   "During first start yarn local, log dirs are removed and recreated"
-           )
-    elif not params.security_enabled:
-      File(params.nm_security_marker, action="delete")
-
-
-  if not params.security_enabled or params.toggle_nm_security:
-    # handle_mounted_dirs ensures that we don't create dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
-    nm_log_dir_to_mount_file_content = handle_mounted_dirs(create_log_dir, params.nm_log_dirs, params.nm_log_dir_to_mount_file, params)
-    # create a history file used by handle_mounted_dirs
-    File(params.nm_log_dir_to_mount_file,
-         owner=params.hdfs_user,
-         group=params.user_group,
-         mode=0644,
-         content=nm_log_dir_to_mount_file_content
-    )
-    nm_local_dir_to_mount_file_content = handle_mounted_dirs(create_local_dir, params.nm_local_dirs, params.nm_local_dir_to_mount_file, params)
-    File(params.nm_local_dir_to_mount_file,
-         owner=params.hdfs_user,
-         group=params.user_group,
-         mode=0644,
-         content=nm_local_dir_to_mount_file_content
-    )
-
-def setup_resourcemanager():
-  import params
-
-  Directory(params.rm_nodes_exclude_dir,
-       mode=0755,
-       create_parents=True,
-       cd_access='a',
-  )
-  File(params.rm_nodes_exclude_path,
-       owner=params.yarn_user,
-       group=params.user_group
-  )
-  File(params.yarn_job_summary_log,
-     owner=params.yarn_user,
-     group=params.user_group
-  )
-  if not is_empty(params.node_label_enable) and params.node_label_enable or is_empty(params.node_label_enable) and params.node_labels_dir:
-    params.HdfsResource(params.node_labels_dir,
-                         type="directory",
-                         action="create_on_execute",
-                         change_permissions_for_parents=True,
-                         owner=params.yarn_user,
-                         group=params.user_group,
-                         mode=0700
-    )
-    params.HdfsResource(None, action="execute")
-
-def setup_ats():
-  import params
-
-  Directory(params.ats_leveldb_dir,
-     owner=params.yarn_user,
-     group=params.user_group,
-     create_parents = True,
-     cd_access="a",
-  )
-
-  # if stack support application timeline-service state store property (timeline_state_store stack feature)
-  if params.stack_supports_timeline_state_store:
-    Directory(params.ats_leveldb_state_store_dir,
-     owner=params.yarn_user,
-     group=params.user_group,
-     create_parents = True,
-     cd_access="a",
-    )
-  # app timeline server 1.5 directories
-  if not is_empty(params.entity_groupfs_store_dir):
-    parent_path = os.path.dirname(params.entity_groupfs_store_dir)
-    params.HdfsResource(parent_path,
-                        type="directory",
-                        action="create_on_execute",
-                        change_permissions_for_parents=True,
-                        owner=params.yarn_user,
-                        group=params.user_group,
-                        mode=0755
-                        )
-    params.HdfsResource(params.entity_groupfs_store_dir,
-                        type="directory",
-                        action="create_on_execute",
-                        owner=params.yarn_user,
-                        group=params.user_group,
-                        mode=params.entity_groupfs_store_dir_mode
-                        )
-  if not is_empty(params.entity_groupfs_active_dir):
-    parent_path = os.path.dirname(params.entity_groupfs_active_dir)
-    params.HdfsResource(parent_path,
-                        type="directory",
-                        action="create_on_execute",
-                        change_permissions_for_parents=True,
-                        owner=params.yarn_user,
-                        group=params.user_group,
-                        mode=0755
-                        )
-    params.HdfsResource(params.entity_groupfs_active_dir,
-                        type="directory",
-                        action="create_on_execute",
-                        owner=params.yarn_user,
-                        group=params.user_group,
-                        mode=params.entity_groupfs_active_dir_mode
-                        )
-  params.HdfsResource(None, action="execute")
-
-def create_log_dir(dir_name):
-  import params
-  Directory(dir_name,
-            create_parents = True,
-            cd_access="a",
-            mode=0775,
-            owner=params.yarn_user,
-            group=params.user_group,
-            ignore_failures=True,
-  )
-
-def create_local_dir(dir_name):
-  import params
-  Directory(dir_name,
-            create_parents = True,
-            cd_access="a",
-            mode=0755,
-            owner=params.yarn_user,
-            group=params.user_group,
-            ignore_failures=True,
-            recursive_mode_flags = {'f': 'a+rw', 'd': 'a+rwx'},
-  )
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def yarn(name = None):
-  import params
-  XmlConfig("mapred-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            owner=params.yarn_user,
-            mode='f'
-  )
-  XmlConfig("yarn-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['yarn-site'],
-            owner=params.yarn_user,
-            mode='f',
-            configuration_attributes=params.config['configuration_attributes']['yarn-site']
-  )
-  XmlConfig("capacity-scheduler.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['capacity-scheduler'],
-            owner=params.yarn_user,
-            mode='f'
-  )
-
-  if params.service_map.has_key(name):
-    service_name = params.service_map[name]
-
-    ServiceConfig(service_name,
-                  action="change_user",
-                  username = params.yarn_user,
-                  password = Script.get_password(params.yarn_user))
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn_client.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn_client.py
deleted file mode 100644
index beea8b9..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn_client.py
+++ /dev/null
@@ -1,67 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.core.exceptions import ClientComponentHasNoStatus
-from yarn import yarn
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-
-
-class YarnClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class YarnClientWindows(YarnClient):
-  pass
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class YarnClientDefault(YarnClient):
-  def get_component_name(self):
-    return "hadoop-client"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-client", params.version)
-
-
-if __name__ == "__main__":
-  YarnClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/container-executor.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/container-executor.cfg.j2 b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/container-executor.cfg.j2
deleted file mode 100644
index c6f1ff6..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/container-executor.cfg.j2
+++ /dev/null
@@ -1,40 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-yarn.nodemanager.local-dirs={{nm_local_dirs}}
-yarn.nodemanager.log-dirs={{nm_log_dirs}}
-yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}}
-banned.users=hdfs,yarn,mapred,bin
-min.user.id={{min_user_id}}


[11/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/service_check.py
deleted file mode 100644
index 981f002..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/service_check.py
+++ /dev/null
@@ -1,152 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.core.shell import as_user
-from ambari_commons.os_family_impl import OsFamilyImpl
-from ambari_commons import OSConst
-from resource_management.libraries.functions.curl_krb_request import curl_krb_request
-from resource_management.libraries import functions
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
-from resource_management.core.logger import Logger
-from resource_management.core.source import StaticFile
-from resource_management.core.resources.system import Execute, File
-
-
-class HdfsServiceCheck(Script):
-  pass
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class HdfsServiceCheckDefault(HdfsServiceCheck):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-    unique = functions.get_unique_id_and_date()
-    dir = params.hdfs_tmp_dir
-    tmp_file = format("{dir}/{unique}")
-
-    safemode_command = format("dfsadmin -fs {namenode_address} -safemode get | grep OFF")
-
-    if params.security_enabled:
-      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
-        user=params.hdfs_user
-      )
-    ExecuteHadoop(safemode_command,
-                  user=params.hdfs_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=20,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    params.HdfsResource(dir,
-                        type="directory",
-                        action="create_on_execute",
-                        mode=0777
-    )
-    params.HdfsResource(tmp_file,
-                        type="file",
-                        action="delete_on_execute",
-    )
-
-    params.HdfsResource(tmp_file,
-                        type="file",
-                        source="/etc/passwd",
-                        action="create_on_execute"
-    )
-    params.HdfsResource(None, action="execute")
-
-    if params.has_journalnode_hosts:
-      if params.security_enabled:
-        for host in params.journalnode_hosts:
-          if params.https_only:
-            uri = format("https://{host}:{journalnode_port}")
-          else:
-            uri = format("http://{host}:{journalnode_port}")
-          response, errmsg, time_millis = curl_krb_request(params.tmp_dir, params.smoke_user_keytab,
-                                                           params.smokeuser_principal, uri, "jn_service_check",
-                                                           params.kinit_path_local, False, None, params.smoke_user)
-          if not response:
-            Logger.error("Cannot access WEB UI on: {0}. Error : {1}", uri, errmsg)
-            return 1
-      else:
-        journalnode_port = params.journalnode_port
-        checkWebUIFileName = "checkWebUI.py"
-        checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
-        comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-        checkWebUICmd = format("ambari-python-wrap {checkWebUIFilePath} -m {comma_sep_jn_hosts} -p {journalnode_port} -s {https_only}")
-        File(checkWebUIFilePath,
-             content=StaticFile(checkWebUIFileName),
-             mode=0775)
-
-        Execute(checkWebUICmd,
-                logoutput=True,
-                try_sleep=3,
-                tries=5,
-                user=params.smoke_user
-        )
-
-    if params.is_namenode_master:
-      if params.has_zkfc_hosts:
-        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
-        check_zkfc_process_cmd = as_user(format(
-          "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.hdfs_user)
-        Execute(check_zkfc_process_cmd,
-                logoutput=True,
-                try_sleep=3,
-                tries=5
-        )
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class HdfsServiceCheckWindows(HdfsServiceCheck):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    unique = functions.get_unique_id_and_date()
-
-    #Hadoop uses POSIX-style paths, separator is always /
-    dir = params.hdfs_tmp_dir
-    tmp_file = dir + '/' + unique
-
-    #commands for execution
-    hadoop_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hadoop.cmd"))
-    create_dir_cmd = "%s fs -mkdir %s" % (hadoop_cmd, dir)
-    own_dir = "%s fs -chmod 777 %s" % (hadoop_cmd, dir)
-    test_dir_exists = "%s fs -test -e %s" % (hadoop_cmd, dir)
-    cleanup_cmd = "%s fs -rm %s" % (hadoop_cmd, tmp_file)
-    create_file_cmd = "%s fs -put %s %s" % (hadoop_cmd, os.path.join(params.hadoop_conf_dir, "core-site.xml"), tmp_file)
-    test_cmd = "%s fs -test -e %s" % (hadoop_cmd, tmp_file)
-
-    hdfs_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hdfs.cmd"))
-    safemode_command = "%s dfsadmin -safemode get | %s OFF" % (hdfs_cmd, params.grep_exe)
-
-    Execute(safemode_command, logoutput=True, try_sleep=3, tries=20)
-    Execute(create_dir_cmd, user=params.hdfs_user,logoutput=True, ignore_failures=True)
-    Execute(own_dir, user=params.hdfs_user,logoutput=True)
-    Execute(test_dir_exists, user=params.hdfs_user,logoutput=True)
-    Execute(create_file_cmd, user=params.hdfs_user,logoutput=True)
-    Execute(test_cmd, user=params.hdfs_user,logoutput=True)
-    Execute(cleanup_cmd, user=params.hdfs_user,logoutput=True)
-
-if __name__ == "__main__":
-  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/setup_ranger_hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/setup_ranger_hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/setup_ranger_hdfs.py
deleted file mode 100644
index e3aff9d..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/setup_ranger_hdfs.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.constants import Direction
-from resource_management.libraries.functions.format import format
-
-
-def setup_ranger_hdfs(upgrade_type=None):
-  import params
-
-  if params.has_ranger_admin:
-
-
-    stack_version = None
-
-    if upgrade_type is not None:
-      stack_version = params.version
-
-    if params.retryAble:
-      Logger.info("HDFS: Setup ranger: command retry enables thus retrying if ranger admin is down !")
-    else:
-      Logger.info("HDFS: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
-
-
-    if params.xml_configurations_supported:
-        from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
-        api_version=None
-        if params.stack_supports_ranger_kerberos:
-          api_version='v2'
-        setup_ranger_plugin('hadoop-client', 'hdfs', params.previous_jdbc_jar,
-                             params.downloaded_custom_connector, params.driver_curl_source,
-                             params.driver_curl_target, params.java_home,
-                             params.repo_name, params.hdfs_ranger_plugin_repo,
-                             params.ranger_env, params.ranger_plugin_properties,
-                             params.policy_user, params.policymgr_mgr_url,
-                             params.enable_ranger_hdfs, conf_dict=params.hadoop_conf_dir,
-                             component_user=params.hdfs_user, component_group=params.user_group, cache_service_list=['hdfs'],
-                             plugin_audit_properties=params.config['configurations']['ranger-hdfs-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hdfs-audit'],
-                             plugin_security_properties=params.config['configurations']['ranger-hdfs-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hdfs-security'],
-                             plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hdfs-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hdfs-policymgr-ssl'],
-                             component_list=['hadoop-client'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
-                             credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
-                             ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                             api_version=api_version ,stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble,
-                             is_security_enabled = params.security_enabled,
-                             is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
-                             component_user_principal=params.nn_principal_name if params.security_enabled else None,
-                             component_user_keytab=params.nn_keytab if params.security_enabled else None)
-    else:
-        from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
-
-        setup_ranger_plugin('hadoop-client', 'hdfs', params.previous_jdbc_jar,
-                            params.downloaded_custom_connector, params.driver_curl_source,
-                            params.driver_curl_target, params.java_home,
-                            params.repo_name, params.hdfs_ranger_plugin_repo,
-                            params.ranger_env, params.ranger_plugin_properties,
-                            params.policy_user, params.policymgr_mgr_url,
-                            params.enable_ranger_hdfs, conf_dict=params.hadoop_conf_dir,
-                            component_user=params.hdfs_user, component_group=params.user_group, cache_service_list=['hdfs'],
-                            plugin_audit_properties=params.config['configurations']['ranger-hdfs-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hdfs-audit'],
-                            plugin_security_properties=params.config['configurations']['ranger-hdfs-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hdfs-security'],
-                            plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hdfs-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hdfs-policymgr-ssl'],
-                            component_list=['hadoop-client'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
-                            credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
-                            ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                            stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
-
-    if stack_version and params.upgrade_direction == Direction.UPGRADE:
-      # when upgrading to stack remove_ranger_hdfs_plugin_env, this env file must be removed
-      if check_stack_feature(StackFeature.REMOVE_RANGER_HDFS_PLUGIN_ENV, stack_version):
-        source_file = os.path.join(params.hadoop_conf_dir, 'set-hdfs-plugin-env.sh')
-        target_file = source_file + ".bak"
-        Execute(("mv", source_file, target_file), sudo=True, only_if=format("test -f {source_file}"))
-  else:
-    Logger.info('Ranger admin not installed')
-
-def create_ranger_audit_hdfs_directories():
-  import params
-
-  if params.has_ranger_admin:
-    if params.xml_configurations_supported and params.enable_ranger_hdfs and params.xa_audit_hdfs_is_enabled:
-      params.HdfsResource("/ranger/audit",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hdfs_user,
-                         group=params.hdfs_user,
-                         mode=0755,
-                         recursive_chmod=True,
-      )
-      params.HdfsResource("/ranger/audit/hdfs",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hdfs_user,
-                         group=params.hdfs_user,
-                         mode=0700,
-                         recursive_chmod=True,
-      )
-      params.HdfsResource(None, action="execute")
-  else:
-    Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
deleted file mode 100644
index 0f1f438..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
+++ /dev/null
@@ -1,155 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-
-from hdfs_snamenode import snamenode
-from hdfs import hdfs
-from ambari_commons.os_family_impl import OsFamilyImpl
-from ambari_commons import OSConst
-
-from resource_management.core.logger import Logger
-
-class SNameNode(Script):
-  def install(self, env):
-    import params
-    env.set_params(params)
-    self.install_packages(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    hdfs("secondarynamenode")
-    snamenode(action="configure")
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    snamenode(action="start")
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    snamenode(action="stop")
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    snamenode(action="status")
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class SNameNodeDefault(SNameNode):
-
-  def get_component_name(self):
-    return "hadoop-hdfs-secondarynamenode"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-hdfs-secondarynamenode", params.version)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['dfs.secondary.namenode.kerberos.internal.spnego.principal',
-                         'dfs.secondary.namenode.keytab.file',
-                         'dfs.secondary.namenode.kerberos.principal']
-    props_read_check = ['dfs.secondary.namenode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.secondary.namenode.keytab.file' not in security_params['hdfs-site'] or
-                  'dfs.secondary.namenode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.secondary.namenode.keytab.file'],
-                                security_params['hdfs-site'][
-                                  'dfs.secondary.namenode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
-  def get_log_folder(self):
-    import params
-    return params.hdfs_log_dir
-  
-  def get_user(self):
-    import params
-    return params.hdfs_user
-
-  def get_pid_files(self):
-    import status_params
-    return [status_params.snamenode_pid_file]
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class SNameNodeWindows(SNameNode):
-  pass
-
-if __name__ == "__main__":
-  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/status_params.py
deleted file mode 100644
index 153f9a6..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/status_params.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons import OSCheck
-
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.script.script import Script
-
-config = Script.get_config()
-
-if OSCheck.is_windows_family():
-  namenode_win_service_name = "namenode"
-  datanode_win_service_name = "datanode"
-  snamenode_win_service_name = "secondarynamenode"
-  journalnode_win_service_name = "journalnode"
-  zkfc_win_service_name = "zkfc"
-else:
-  hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-  hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-  hadoop_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-  datanode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
-  namenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
-  snamenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
-  journalnode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
-  zkfc_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
-  nfsgateway_pid_file = format("{hadoop_pid_dir_prefix}/root/hadoop_privileged_nfs3.pid")
-
-  # Security related/required params
-  hostname = config['hostname']
-  security_enabled = config['configurations']['cluster-env']['security_enabled']
-  hdfs_user_principal = config['configurations']['hadoop-env']['hdfs_principal_name']
-  hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-
-  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-  tmp_dir = Script.get_tmp_dir()
-
-stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/utils.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/utils.py
deleted file mode 100644
index f76935a..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/utils.py
+++ /dev/null
@@ -1,384 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-import re
-import urllib2
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-from resource_management.core.resources.system import Directory, File, Execute
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions import check_process_status
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.core import shell
-from resource_management.core.shell import as_user, as_sudo
-from resource_management.core.exceptions import ComponentIsNotRunning
-from resource_management.core.logger import Logger
-from resource_management.libraries.functions.curl_krb_request import curl_krb_request
-from resource_management.core.exceptions import Fail
-from resource_management.libraries.functions.namenode_ha_utils import get_namenode_states
-from resource_management.libraries.functions.show_logs import show_logs
-from resource_management.libraries.script.script import Script
-from ambari_commons.inet_utils import ensure_ssl_using_protocol
-from zkfc_slave import ZkfcSlaveDefault
-
-ensure_ssl_using_protocol(Script.get_force_https_protocol())
-
-def safe_zkfc_op(action, env):
-  """
-  Idempotent operation on the zkfc process to either start or stop it.
-  :param action: start or stop
-  :param env: environment
-  """
-  Logger.info("Performing action {0} on zkfc.".format(action))
-  zkfc = None
-  if action == "start":
-    try:
-      ZkfcSlaveDefault.status_static(env)
-    except ComponentIsNotRunning:
-      ZkfcSlaveDefault.start_static(env)
-
-  if action == "stop":
-    try:
-      ZkfcSlaveDefault.status_static(env)
-    except ComponentIsNotRunning:
-      pass
-    else:
-      ZkfcSlaveDefault.stop_static(env)
-
-def initiate_safe_zkfc_failover():
-  """
-  If this is the active namenode, initiate a safe failover and wait for it to become the standby.
-
-  If an error occurs, force a failover to happen by killing zkfc on this host. In this case, during the Restart,
-  will also have to start ZKFC manually.
-  """
-  import params
-
-  # Must kinit before running the HDFS command
-  if params.security_enabled:
-    Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
-            user = params.hdfs_user)
-
-  active_namenode_id = None
-  standby_namenode_id = None
-  active_namenodes, standby_namenodes, unknown_namenodes = get_namenode_states(params.hdfs_site, params.security_enabled, params.hdfs_user)
-  if active_namenodes:
-    active_namenode_id = active_namenodes[0][0]
-  if standby_namenodes:
-    standby_namenode_id = standby_namenodes[0][0]
-
-  if active_namenode_id:
-    Logger.info(format("Active NameNode id: {active_namenode_id}"))
-  if standby_namenode_id:
-    Logger.info(format("Standby NameNode id: {standby_namenode_id}"))
-  if unknown_namenodes:
-    for unknown_namenode in unknown_namenodes:
-      Logger.info("NameNode HA state for {0} is unknown".format(unknown_namenode[0]))
-
-  if params.namenode_id == active_namenode_id and params.other_namenode_id == standby_namenode_id:
-    # Failover if this NameNode is active and other NameNode is up and in standby (i.e. ready to become active on failover)
-    Logger.info(format("NameNode {namenode_id} is active and NameNode {other_namenode_id} is in standby"))
-
-    failover_command = format("hdfs haadmin -ns {dfs_ha_nameservices} -failover {namenode_id} {other_namenode_id}")
-    check_standby_cmd = format("hdfs haadmin -ns {dfs_ha_nameservices} -getServiceState {namenode_id} | grep standby")
-
-    msg = "Rolling Upgrade - Initiating a ZKFC failover on active NameNode host {0}.".format(params.hostname)
-    Logger.info(msg)
-    code, out = shell.call(failover_command, user=params.hdfs_user, logoutput=True)
-    Logger.info(format("Rolling Upgrade - failover command returned {code}"))
-    wait_for_standby = False
-
-    if code == 0:
-      wait_for_standby = True
-    else:
-      # Try to kill ZKFC manually
-      was_zkfc_killed = kill_zkfc(params.hdfs_user)
-      code, out = shell.call(check_standby_cmd, user=params.hdfs_user, logoutput=True)
-      Logger.info(format("Rolling Upgrade - check for standby returned {code}"))
-      if code == 255 and out:
-        Logger.info("Rolling Upgrade - NameNode is already down.")
-      else:
-        if was_zkfc_killed:
-          # Only mandate that this be the standby namenode if ZKFC was indeed killed to initiate a failover.
-          wait_for_standby = True
-
-    if wait_for_standby:
-      Logger.info("Waiting for this NameNode to become the standby one.")
-      Execute(check_standby_cmd,
-              user=params.hdfs_user,
-              tries=50,
-              try_sleep=6,
-              logoutput=True)
-  else:
-    msg = "Rolling Upgrade - Skipping ZKFC failover on NameNode host {0}.".format(params.hostname)
-    Logger.info(msg)
-
-def kill_zkfc(zkfc_user):
-  """
-  There are two potential methods for failing over the namenode, especially during a Rolling Upgrade.
-  Option 1. Kill zkfc on primary namenode provided that the secondary is up and has zkfc running on it.
-  Option 2. Silent failover
-  :param zkfc_user: User that started the ZKFC process.
-  :return: Return True if ZKFC was killed, otherwise, false.
-  """
-  import params
-  if params.dfs_ha_enabled:
-    if params.zkfc_pid_file:
-      check_process = as_user(format("ls {zkfc_pid_file} > /dev/null 2>&1 && ps -p `cat {zkfc_pid_file}` > /dev/null 2>&1"), user=zkfc_user)
-      code, out = shell.call(check_process)
-      if code == 0:
-        Logger.debug("ZKFC is running and will be killed.")
-        kill_command = format("kill -15 `cat {zkfc_pid_file}`")
-        Execute(kill_command,
-                user=zkfc_user
-        )
-        File(params.zkfc_pid_file,
-             action = "delete",
-             )
-        return True
-  return False
-
-def service(action=None, name=None, user=None, options="", create_pid_dir=False,
-            create_log_dir=False):
-  """
-  :param action: Either "start" or "stop"
-  :param name: Component name, e.g., "namenode", "datanode", "secondarynamenode", "zkfc"
-  :param user: User to run the command as
-  :param options: Additional options to pass to command as a string
-  :param create_pid_dir: Create PID directory
-  :param create_log_dir: Crate log file directory
-  """
-  import params
-
-  options = options if options else ""
-  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
-  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
-  hadoop_env_exports = {
-    'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
-  }
-  log_dir = format("{hdfs_log_dir_prefix}/{user}")
-
-  # NFS GATEWAY is always started by root using jsvc due to rpcbind bugs
-  # on Linux such as CentOS6.2. https://bugzilla.redhat.com/show_bug.cgi?id=731542
-  if name == "nfs3" :
-    pid_file = format("{pid_dir}/hadoop_privileged_nfs3.pid")
-    custom_export = {
-      'HADOOP_PRIVILEGED_NFS_USER': params.hdfs_user,
-      'HADOOP_PRIVILEGED_NFS_PID_DIR': pid_dir,
-      'HADOOP_PRIVILEGED_NFS_LOG_DIR': log_dir
-    }
-    hadoop_env_exports.update(custom_export)
-
-  process_id_exists_command = as_sudo(["test", "-f", pid_file]) + " && " + as_sudo(["pgrep", "-F", pid_file])
-
-  # on STOP directories shouldn't be created
-  # since during stop still old dirs are used (which were created during previous start)
-  if action != "stop":
-    if name == "nfs3":
-      Directory(params.hadoop_pid_dir_prefix,
-                mode=0755,
-                owner=params.root_user,
-                group=params.root_group
-      )
-    else:
-      Directory(params.hadoop_pid_dir_prefix,
-                  mode=0755,
-                  owner=params.hdfs_user,
-                  group=params.user_group
-      )
-    if create_pid_dir:
-      Directory(pid_dir,
-                owner=user,
-                group=params.user_group,
-                create_parents = True)
-    if create_log_dir:
-      if name == "nfs3":
-        Directory(log_dir,
-                  mode=0775,
-                  owner=params.root_user,
-                  group=params.user_group)
-      else:
-        Directory(log_dir,
-                  owner=user,
-                  group=params.user_group,
-                  create_parents = True)
-
-  if params.security_enabled and name == "datanode":
-    ## The directory where pid files are stored in the secure data environment.
-    hadoop_secure_dn_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-    hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
-
-    # At datanode_non_root stack version and further, we may start datanode as a non-root even in secure cluster
-    if not (params.stack_version_formatted and check_stack_feature(StackFeature.DATANODE_NON_ROOT, params.stack_version_formatted)) or params.secure_dn_ports_are_in_use:
-      user = "root"
-      pid_file = format(
-        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
-
-    if action == 'stop' and (params.stack_version_formatted and check_stack_feature(StackFeature.DATANODE_NON_ROOT, params.stack_version_formatted)) and \
-      os.path.isfile(hadoop_secure_dn_pid_file):
-        # We need special handling for this case to handle the situation
-        # when we configure non-root secure DN and then restart it
-        # to handle new configs. Otherwise we will not be able to stop
-        # a running instance 
-        user = "root"
-        
-        try:
-          check_process_status(hadoop_secure_dn_pid_file)
-          
-          custom_export = {
-            'HADOOP_SECURE_DN_USER': params.hdfs_user
-          }
-          hadoop_env_exports.update(custom_export)
-          
-        except ComponentIsNotRunning:
-          pass
-
-  hadoop_daemon = format("{hadoop_bin}/hadoop-daemon.sh")
-
-  if user == "root":
-    cmd = [hadoop_daemon, "--config", params.hadoop_conf_dir, action, name]
-    if options:
-      cmd += [options, ]
-    daemon_cmd = as_sudo(cmd)
-  else:
-    cmd = format("{ulimit_cmd} {hadoop_daemon} --config {hadoop_conf_dir} {action} {name}")
-    if options:
-      cmd += " " + options
-    daemon_cmd = as_user(cmd, user)
-     
-  if action == "start":
-    # remove pid file from dead process
-    File(pid_file, action="delete", not_if=process_id_exists_command)
-    
-    try:
-      Execute(daemon_cmd, not_if=process_id_exists_command, environment=hadoop_env_exports)
-    except:
-      show_logs(log_dir, user)
-      raise
-  elif action == "stop":
-    try:
-      Execute(daemon_cmd, only_if=process_id_exists_command, environment=hadoop_env_exports)
-    except:
-      show_logs(log_dir, user)
-      raise
-    File(pid_file, action="delete")
-
-def get_jmx_data(nn_address, modeler_type, metric, encrypted=False, security_enabled=False):
-  """
-  :param nn_address: Namenode Address, e.g., host:port, ** MAY ** be preceded with "http://" or "https://" already.
-  If not preceded, will use the encrypted param to determine.
-  :param modeler_type: Modeler type to query using startswith function
-  :param metric: Metric to return
-  :return: Return an object representation of the metric, or None if it does not exist
-  """
-  if not nn_address or not modeler_type or not metric:
-    return None
-
-  nn_address = nn_address.strip()
-  if not nn_address.startswith("http"):
-    nn_address = ("https://" if encrypted else "http://") + nn_address
-  if not nn_address.endswith("/"):
-    nn_address = nn_address + "/"
-
-  nn_address = nn_address + "jmx"
-  Logger.info("Retrieve modeler: %s, metric: %s from JMX endpoint %s" % (modeler_type, metric, nn_address))
-
-  if security_enabled:
-    import params
-    data, error_msg, time_millis = curl_krb_request(params.tmp_dir, params.smoke_user_keytab, params.smokeuser_principal, nn_address,
-                            "jn_upgrade", params.kinit_path_local, False, None, params.smoke_user)
-  else:
-    data = urllib2.urlopen(nn_address).read()
-  my_data = None
-  if data:
-    data_dict = json.loads(data)
-    if data_dict:
-      for el in data_dict['beans']:
-        if el is not None and el['modelerType'] is not None and el['modelerType'].startswith(modeler_type):
-          if metric in el:
-            my_data = el[metric]
-            if my_data:
-              my_data = json.loads(str(my_data))
-              break
-  return my_data
-
-def get_port(address):
-  """
-  Extracts port from the address like 0.0.0.0:1019
-  """
-  if address is None:
-    return None
-  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
-  if m is not None and len(m.groups()) >= 2:
-    return int(m.group(2))
-  else:
-    return None
-
-
-def is_secure_port(port):
-  """
-  Returns True if port is root-owned at *nix systems
-  """
-  if port is not None:
-    return port < 1024
-  else:
-    return False
-
-def is_previous_fs_image():
-  """
-  Return true if there's a previous folder in the HDFS namenode directories.
-  """
-  import params
-  if params.dfs_name_dir:
-    nn_name_dirs = params.dfs_name_dir.split(',')
-    for nn_dir in nn_name_dirs:
-      prev_dir = os.path.join(nn_dir, "previous")
-      if os.path.isdir(prev_dir):
-        return True
-  return False
-
-def get_hdfs_binary(distro_component_name):
-  """
-  Get the hdfs binary to use depending on the stack and version.
-  :param distro_component_name: e.g., hadoop-hdfs-namenode, hadoop-hdfs-datanode
-  :return: The hdfs binary to use
-  """
-  import params
-  hdfs_binary = "hdfs"
-  if params.stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted):
-    hdfs_binary = "{0}/current/{1}/bin/hdfs".format(params.stack_root, distro_component_name)
-
-  return hdfs_binary
-
-def get_dfsadmin_base_command(hdfs_binary, use_specific_namenode = False):
-  """
-  Get the dfsadmin base command constructed using hdfs_binary path and passing namenode address as explicit -fs argument
-  :param hdfs_binary: path to hdfs binary to use
-  :param use_specific_namenode: flag if set and Namenode HA is enabled, then the dfsadmin command will use
-  current namenode's address
-  :return: the constructed dfsadmin base command
-  """
-  import params
-  dfsadmin_base_command = ""
-  if params.dfs_ha_enabled and use_specific_namenode:
-    dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs hdfs://{params.namenode_rpc}")
-  else:
-    dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs {params.namenode_address}")
-  return dfsadmin_base_command

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py
deleted file mode 100644
index f1891a5..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py
+++ /dev/null
@@ -1,225 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-# this is needed to avoid a circular dependency since utils.py calls this class
-import utils
-from hdfs import hdfs
-
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.core.resources.system import Directory
-from resource_management.core.resources.service import Service
-from resource_management.core import shell
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.security_commons import build_expectations
-from resource_management.libraries.functions.security_commons import cached_kinit_executor
-from resource_management.libraries.functions.security_commons import get_params_from_filesystem
-from resource_management.libraries.functions.security_commons import validate_security_config_properties
-from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions.version_select_util import get_component_version
-
-class ZkfcSlave(Script):
-  def get_component_name(self):
-    import params
-    if params.version_for_stack_feature_checks and check_stack_feature(StackFeature.ZKFC_VERSION_ADVERTISED, params.version_for_stack_feature_checks):
-      # params.version is not defined when installing cluster from blueprint
-      return "hadoop-hdfs-zkfc"
-    pass
-
-  def install(self, env):
-    import params
-    env.set_params(params)
-    self.install_packages(env)
-    
-  def configure(env):
-    ZkfcSlave.configure_static(env)
-    
-  @staticmethod
-  def configure_static(env):
-    import params
-    env.set_params(params)
-    hdfs("zkfc_slave")
-    pass
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class ZkfcSlaveDefault(ZkfcSlave):
-
-  def start(self, env, upgrade_type=None):
-    ZkfcSlaveDefault.start_static(env, upgrade_type)
-    
-  @staticmethod
-  def start_static(env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    ZkfcSlave.configure_static(env)
-    Directory(params.hadoop_pid_dir_prefix,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-    # format the znode for this HA setup
-    # only run this format command if the active namenode hostname is set
-    # The Ambari UI HA Wizard prompts the user to run this command
-    # manually, so this guarantees it is only run in the Blueprints case
-    if params.dfs_ha_enabled and \
-       params.dfs_ha_namenode_active is not None:
-      success =  initialize_ha_zookeeper(params)
-      if not success:
-        raise Fail("Could not initialize HA state in zookeeper")
-
-    utils.service(
-      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-  
-  def stop(self, env, upgrade_type=None):
-    ZkfcSlaveDefault.stop_static(env, upgrade_type)
-
-  @staticmethod
-  def stop_static(env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    utils.service(
-      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-
-  def status(self, env):
-    ZkfcSlaveDefault.status_static(env)
-    
-  @staticmethod
-  def status_static(env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.zkfc_pid_file)
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'core-site.xml': FILE_TYPE_XML})
-    result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      if not result_issues:  # If all validations passed successfully
-        if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
-          try:
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hdfs_user,
-                                  status_params.hdfs_user_keytab,
-                                  status_params.hdfs_user_principal,
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          self.put_structured_out(
-            {"securityIssuesFound": "hdfs principal and/or keytab file is not specified"})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
-  def get_log_folder(self):
-    import params
-    return params.hdfs_log_dir
-  
-  def get_user(self):
-    import params
-    return params.hdfs_user
-
-  def get_pid_files(self):
-    import status_params
-    return [status_params.zkfc_pid_file]
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-    if params.version and check_stack_feature(StackFeature.ZKFC_VERSION_ADVERTISED, params.version) \
-        and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-hdfs-zkfc", params.version)
-
-def initialize_ha_zookeeper(params):
-  try:
-    iterations = 10
-    formatZK_cmd = "hdfs zkfc -formatZK -nonInteractive"
-    Logger.info("Initialize HA state in ZooKeeper: %s" % (formatZK_cmd))
-    for i in range(iterations):
-      Logger.info('Try %d out of %d' % (i+1, iterations))
-      code, out = shell.call(formatZK_cmd, logoutput=False, user=params.hdfs_user)
-      if code == 0:
-        Logger.info("HA state initialized in ZooKeeper successfully")
-        return True
-      elif code == 2:
-        Logger.info("HA state already initialized in ZooKeeper")
-        return True
-      else:
-        Logger.warning('HA state initialization in ZooKeeper failed with %d error code. Will retry' % (code))
-  except Exception as ex:
-    Logger.error('HA state initialization in ZooKeeper threw an exception. Reason %s' %(str(ex)))
-  return False
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class ZkfcSlaveWindows(ZkfcSlave):
-  def start(self, env):
-    import params
-    self.configure(env)
-    Service(params.zkfc_win_service_name, action="start")
-
-  def stop(self, env):
-    import params
-    Service(params.zkfc_win_service_name, action="stop")
-
-  def status(self, env):
-    import status_params
-    from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
-
-    env.set_params(status_params)
-    check_windows_service_status(status_params.zkfc_win_service_name)
-
-if __name__ == "__main__":
-  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index a92cdc1..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/hdfs.conf.j2
deleted file mode 100644
index fad5621..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/hdfs.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{hdfs_user}}   - nofile {{hdfs_user_nofile_limit}}
-{{hdfs_user}}   - nproc  {{hdfs_user_nproc_limit}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/slaves.j2 b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/slaves.j2
deleted file mode 100644
index 4a9e713..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/slaves.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/quicklinks/quicklinks.json
deleted file mode 100644
index 5318ba0..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/quicklinks/quicklinks.json
+++ /dev/null
@@ -1,80 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol":
-    {
-      "type":"https",
-      "checks":[
-        {
-          "property":"dfs.http.policy",
-          "desired":"HTTPS_ONLY",
-          "site":"hdfs-site"
-        }
-      ]
-    },
-
-    "links": [
-      {
-        "name": "namenode_ui",
-        "label": "NameNode UI",
-        "component_name": "NAMENODE",
-        "url":"%@://%@:%@",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      },
-      {
-        "name": "namenode_logs",
-        "label": "NameNode Logs",
-        "component_name": "NAMENODE",
-        "url":"%@://%@:%@/logs",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      },
-      {
-        "name": "namenode_jmx",
-        "label": "NameNode JMX",
-        "component_name": "NAMENODE",
-        "url":"%@://%@:%@/jmx",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      },
-      {
-        "name": "Thread Stacks",
-        "label": "Thread Stacks",
-        "component_name": "NAMENODE",
-        "url":"%@://%@:%@/stacks",
-        "requires_user_name": "false",
-        "port":{
-          "http_property": "dfs.namenode.http-address",
-          "http_default_port": "50070",
-          "https_property": "dfs.namenode.https-address",
-          "https_default_port": "50470",
-          "regex": "\\w*:(\\d+)",
-          "site": "hdfs-site"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/themes/theme.json b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/themes/theme.json
deleted file mode 100644
index 6f2b797..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/themes/theme.json
+++ /dev/null
@@ -1,179 +0,0 @@
-{
-  "name": "default",
-  "description": "Default theme for HDFS service",
-  "configuration": {
-    "layouts": [
-      {
-        "name": "default",
-        "tabs": [
-          {
-            "name": "settings",
-            "display-name": "Settings",
-            "layout": {
-              "tab-columns": "2",
-              "tab-rows": "1",
-              "sections": [
-                {
-                  "name": "section-namenode",
-                  "display-name": "NameNode",
-                  "row-index": "0",
-                  "column-index": "0",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-namenode-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                },
-                {
-                  "name": "section-datanode",
-                  "display-name": "DataNode",
-                  "row-index": "0",
-                  "column-index": "1",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-datanode-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                }
-              ]
-            }
-          }
-        ]
-      }
-    ],
-    "placement": {
-      "configuration-layout": "default",
-      "configs": [
-        {
-          "config": "hdfs-site/dfs.namenode.name.dir",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hadoop-env/namenode_heapsize",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.namenode.handler.count",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
-          "subsection-name": "subsection-namenode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.datanode.data.dir",
-          "subsection-name": "subsection-datanode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
-          "subsection-name": "subsection-datanode-col1"
-        },
-        {
-          "config": "hadoop-env/dtnode_heapsize",
-          "subsection-name": "subsection-datanode-col1"
-        },
-        {
-          "config": "hdfs-site/dfs.datanode.max.transfer.threads",
-          "subsection-name": "subsection-datanode-col1"
-        }
-      ]
-    },
-    "widgets": [
-      {
-        "config": "hdfs-site/dfs.namenode.name.dir",
-        "widget": {
-          "type": "directories"
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "percent"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.namenode.handler.count",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hadoop-env/namenode_heapsize",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.datanode.data.dir",
-        "widget": {
-          "type": "directories"
-        }
-      },
-      {
-        "config": "hadoop-env/dtnode_heapsize",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "hdfs-site/dfs.datanode.max.transfer.threads",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      }
-    ]
-  }
-}
-


[06/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-policymgr-ssl.xml
deleted file mode 100644
index ad6cf4f..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-policymgr-ssl.xml
+++ /dev/null
@@ -1,66 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore</name>
-    <value>{{stack_root}}/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks</value>
-    <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore.password</name>
-    <value>myKeyFilePassword</value>
-    <property-type>PASSWORD</property-type>
-    <description>password for keystore</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore</name>
-    <value>{{stack_root}}/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks</value>
-    <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore.password</name>
-    <value>changeit</value>
-    <property-type>PASSWORD</property-type>
-    <description>java truststore password</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java keystore credential file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java truststore credential file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-security.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-security.xml
deleted file mode 100644
index 5f69962..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-security.xml
+++ /dev/null
@@ -1,58 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>ranger.plugin.yarn.service.name</name>
-    <value>{{repo_name}}</value>
-    <description>Name of the Ranger service containing policies for this Yarn instance</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.source.impl</name>
-    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
-    <description>Class to retrieve policies from the source</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.rest.url</name>
-    <value>{{policymgr_mgr_url}}</value>
-    <description>URL to Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.rest.ssl.config.file</name>
-    <value>/etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml</value>
-    <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.pollIntervalMs</name>
-    <value>30000</value>
-    <description>How often to poll for changes in policies?</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.cache.dir</name>
-    <value>/etc/ranger/{{repo_name}}/policycache</value>
-    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml
deleted file mode 100644
index d8531b1..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml
+++ /dev/null
@@ -1,306 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>yarn_log_dir_prefix</name>
-    <value>/var/log/hadoop-yarn</value>
-    <display-name>YARN Log Dir Prefix</display-name>
-    <description>YARN Log Dir Prefix</description>
-    <value-attributes>
-      <type>directory</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn_pid_dir_prefix</name>
-    <value>/var/run/hadoop-yarn</value>
-    <display-name>YARN PID Dir Prefix</display-name>
-    <description>YARN PID Dir Prefix</description>
-    <value-attributes>
-      <type>directory</type>
-      <overridable>false</overridable>
-      <editable-only-at-install>true</editable-only-at-install>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn_user</name>
-    <display-name>Yarn User</display-name>
-    <value>yarn</value>
-    <property-type>USER</property-type>
-    <description>YARN User</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn_heapsize</name>
-    <value>1024</value>
-    <display-name>YARN Java heap size</display-name>
-    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
-    <value-attributes>
-      <type>int</type>
-      <unit>MB</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>resourcemanager_heapsize</name>
-    <value>1024</value>
-    <display-name>ResourceManager Java heap size</display-name>
-    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
-    <value-attributes>
-      <type>int</type>
-      <overridable>false</overridable>
-      <unit>MB</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>nodemanager_heapsize</name>
-    <value>1024</value>
-    <display-name>NodeManager Java heap size</display-name>
-    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
-    <value-attributes>
-      <type>int</type>
-      <unit>MB</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>min_user_id</name>
-    <value>1000</value>
-    <display-name>Minimum user ID for submitting job</display-name>
-    <description>Set to 0 to disallow root from submitting jobs. Set to 1000 to disallow all superusers from submitting jobs</description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>is_supported_yarn_ranger</name>
-    <value>true</value>
-    <description>Set to false by default,  needs to be set to true in stacks that use Ranger Yarn Plugin</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn_user_nofile_limit</name>
-    <value>32768</value>
-    <description>Max open files limit setting for YARN user.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn_user_nproc_limit</name>
-    <value>65536</value>
-    <description>Max number of processes limit setting for YARN user.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These properties were inherited from HDP 2.1 -->
-  <property>
-    <name>apptimelineserver_heapsize</name>
-    <value>1024</value>
-    <display-name>AppTimelineServer Java heap size</display-name>
-    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <unit>MB</unit>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These properties were inherited from HDP 2.2 -->
-  <property>
-    <name>yarn_cgroups_enabled</name>
-    <value>false</value>
-    <description>You can use CGroups to isolate CPU-heavy processes in a Hadoop cluster.</description>
-    <display-name>CPU Isolation</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>Enabled</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>Disabled</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- yarn-env.sh -->
-  <property>
-    <name>content</name>
-    <display-name>yarn-env template</display-name>
-    <description>This is the jinja template for yarn-env.sh file</description>
-    <value>
-      export HADOOP_YARN_HOME={{hadoop_yarn_home}}
-      export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
-      export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
-      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-      export JAVA_HOME={{java64_home}}
-      export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
-
-      # We need to add the EWMA appender for the yarn daemons only;
-      # however, YARN_ROOT_LOGGER is shared by the yarn client and the
-      # daemons. This is restrict the EWMA appender to daemons only.
-      INVOKER="${0##*/}"
-      if [ "$INVOKER" == "yarn-daemon.sh" ]; then
-      export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}
-      fi
-
-      # User for YARN daemons
-      export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
-
-      # resolve links - $0 may be a softlink
-      export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
-
-      # some Java parameters
-      # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-      if [ "$JAVA_HOME" != "" ]; then
-      #echo "run java in $JAVA_HOME"
-      JAVA_HOME=$JAVA_HOME
-      fi
-
-      if [ "$JAVA_HOME" = "" ]; then
-      echo "Error: JAVA_HOME is not set."
-      exit 1
-      fi
-
-      JAVA=$JAVA_HOME/bin/java
-      JAVA_HEAP_MAX=-Xmx1000m
-
-      # For setting YARN specific HEAP sizes please use this
-      # Parameter and set appropriately
-      YARN_HEAPSIZE={{yarn_heapsize}}
-
-      # check envvars which might override default args
-      if [ "$YARN_HEAPSIZE" != "" ]; then
-      JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
-      fi
-
-      # Resource Manager specific parameters
-
-      # Specify the max Heapsize for the ResourceManager using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1000.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_RESOURCEMANAGER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
-
-      # Specify the JVM options to be used when starting the ResourceManager.
-      # These options will be appended to the options specified as YARN_OPTS
-      # and therefore may override any similar flags set in YARN_OPTS
-      #export YARN_RESOURCEMANAGER_OPTS=
-
-      # Node Manager specific parameters
-
-      # Specify the max Heapsize for the NodeManager using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1000.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_NODEMANAGER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
-
-      # Specify the max Heapsize for the timeline server using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1024.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_TIMELINESERVER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}
-
-      # Specify the JVM options to be used when starting the NodeManager.
-      # These options will be appended to the options specified as YARN_OPTS
-      # and therefore may override any similar flags set in YARN_OPTS
-      #export YARN_NODEMANAGER_OPTS=
-
-      # so that filenames w/ spaces are handled correctly in loops below
-      IFS=
-
-
-      # default log directory and file
-      if [ "$YARN_LOG_DIR" = "" ]; then
-      YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
-      fi
-      if [ "$YARN_LOGFILE" = "" ]; then
-      YARN_LOGFILE='yarn.log'
-      fi
-
-      # default policy file for service-level authorization
-      if [ "$YARN_POLICYFILE" = "" ]; then
-      YARN_POLICYFILE="hadoop-policy.xml"
-      fi
-
-      # restore ordinary behaviour
-      unset IFS
-
-
-      YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
-      YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
-      YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
-      YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
-      YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
-      YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
-      YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-      YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-      export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
-      export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
-      if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-      YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-      fi
-      YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
-      YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>service_check.queue.name</name>
-    <value>default</value>
-    <description>
-      The queue that used by service check.
-    </description>
-    <depends-on>
-      <property>
-        <type>capacity-scheduler</type>
-        <name>yarn.scheduler.capacity.root.queues</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml
deleted file mode 100644
index 1d828ee..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml
+++ /dev/null
@@ -1,103 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>content</name>
-    <display-name>yarn-log4j template</display-name>
-    <description>Custom log4j.properties</description>
-    <value>
-      #Relative to Yarn Log Dir Prefix
-      yarn.log.dir=.
-      #
-      # Job Summary Appender
-      #
-      # Use following logger to send summary to separate file defined by
-      # hadoop.mapreduce.jobsummary.log.file rolled daily:
-      # hadoop.mapreduce.jobsummary.logger=INFO,JSA
-      #
-      hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-      hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-      log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
-      # Set the ResourceManager summary log filename
-      yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
-      # Set the ResourceManager summary log level and appender
-      yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
-      #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
-
-      # To enable AppSummaryLogging for the RM,
-      # set yarn.server.resourcemanager.appsummary.logger to
-      # LEVEL,RMSUMMARY in hadoop-env.sh
-
-      # Appender for ResourceManager Application Summary Log
-      # Requires the following properties to be set
-      #    - hadoop.log.dir (Hadoop Log directory)
-      #    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
-      #    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
-      log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
-      log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
-      log4j.appender.RMSUMMARY.MaxFileSize=256MB
-      log4j.appender.RMSUMMARY.MaxBackupIndex=20
-      log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
-      log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-      log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-      log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-      log4j.appender.JSA.DatePattern=.yyyy-MM-dd
-      log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-      log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
-      log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-
-      # Appender for viewing information for errors and warnings
-      yarn.ewma.cleanupInterval=300
-      yarn.ewma.messageAgeLimitSeconds=86400
-      yarn.ewma.maxUniqueMessages=250
-      log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
-      log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
-      log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
-      log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
-
-      # Audit logging for ResourceManager
-      rm.audit.logger=${hadoop.root.logger}
-      log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
-      log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
-      log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log
-      log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
-      log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-      log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
-
-      # Audit logging for NodeManager
-      nm.audit.logger=${hadoop.root.logger}
-      log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
-      log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
-      log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log
-      log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
-      log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-      log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-logsearch-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-logsearch-conf.xml
deleted file mode 100644
index 95cf0c9..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-logsearch-conf.xml
+++ /dev/null
@@ -1,104 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>YARN</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>RESOURCEMANAGER:yarn_resourcemanager,yarn_historyserver,yarn_jobsummary;NODEMANAGER:yarn_nodemanager;APP_TIMELINE_SERVER:yarn_timelineserver</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"yarn_nodemanager",
-      "rowtype":"service",
-      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-nodemanager-*.log"
-    },
-    {
-      "type":"yarn_resourcemanager",
-      "rowtype":"service",
-      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-resourcemanager-*.log"
-    },
-    {
-      "type":"yarn_timelineserver",
-      "rowtype":"service",
-      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-timelineserver-*.log"
-    },
-    {
-      "type":"yarn_historyserver",
-      "rowtype":"service",
-      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-historyserver-*.log"
-    },
-    {
-      "type":"yarn_jobsummary",
-      "rowtype":"service",
-      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/hadoop-mapreduce.jobsummary.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "yarn_historyserver",
-            "yarn_jobsummary",
-            "yarn_nodemanager",
-            "yarn_resourcemanager",
-            "yarn_timelineserver"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     }
-   ]
-}
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
deleted file mode 100644
index 01c3b47..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
+++ /dev/null
@@ -1,1151 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Put site-specific property overrides in this file. -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
-  <!-- ResourceManager -->
-  <property>
-    <name>yarn.resourcemanager.hostname</name>
-    <value>localhost</value>
-    <description>The hostname of the RM.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.resource-tracker.address</name>
-    <value>localhost:8025</value>
-    <description> The address of ResourceManager. </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.scheduler.address</name>
-    <value>localhost:8030</value>
-    <description>The address of the scheduler interface.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.address</name>
-    <value>localhost:8050</value>
-    <description>
-      The address of the applications manager interface in the
-      RM.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.admin.address</name>
-    <value>localhost:8141</value>
-    <description>The address of the RM admin interface.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.scheduler.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
-    <description>The class to use as the resource scheduler.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.minimum-allocation-mb</name>
-    <value>512</value>
-    <description>
-      The minimum allocation for every container request at the RM,
-      in MBs. Memory requests lower than this won't take effect,
-      and the specified value will get allocated at minimum.
-    </description>
-    <display-name>Minimum Container Size (Memory)</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>5120</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.nodemanager.resource.memory-mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.maximum-allocation-mb</name>
-    <value>5120</value>
-    <description>
-      The maximum allocation for every container request at the RM,
-      in MBs. Memory requests higher than this won't take effect,
-      and will get capped to this value.
-    </description>
-    <display-name>Maximum Container Size (Memory)</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>5120</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.nodemanager.resource.memory-mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.acl.enable</name>
-    <value>false</value>
-    <description> Are acls enabled. </description>
-    <depends-on>
-      <property>
-        <type>ranger-yarn-plugin-properties</type>
-        <name>ranger-yarn-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.admin.acl</name>
-    <value>yarn</value>
-    <description> ACL of who can be admin of the YARN cluster. </description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- NodeManager -->
-  <property>
-    <name>yarn.nodemanager.address</name>
-    <value>0.0.0.0:45454</value>
-    <description>The address of the container manager in the NM.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.resource.memory-mb</name>
-    <value>5120</value>
-    <description>Amount of physical memory, in MB, that can be allocated
-      for containers.</description>
-    <display-name>Memory allocated for all YARN containers on a node</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>268435456</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.application.classpath</name>
-    <value>$HADOOP_CONF_DIR,{{stack_root}}/current/hadoop-client/*,{{stack_root}}/current/hadoop-client/lib/*,{{stack_root}}/current/hadoop-hdfs-client/*,{{stack_root}}/current/hadoop-hdfs-client/lib/*,{{stack_root}}/current/hadoop-yarn-client/*,{{stack_root}}/current/hadoop-yarn-client/lib/*</value>
-    <description>Classpath for typical applications.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.vmem-pmem-ratio</name>
-    <value>2.1</value>
-    <description>Ratio between virtual memory to physical memory when
-      setting memory limits for containers. Container allocations are
-      expressed in terms of physical memory, and virtual memory usage
-      is allowed to exceed this allocation by this ratio.
-    </description>
-    <display-name>Virtual Memory Ratio</display-name>
-    <value-attributes>
-      <type>float</type>
-      <minimum>0.1</minimum>
-      <maximum>5.0</maximum>
-      <increment-step>0.1</increment-step>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.container-executor.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
-    <description>ContainerExecutor for launching containers</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.group</name>
-    <value>hadoop</value>
-    <description>Unix group of the NodeManager</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-      <property>
-        <type>cluster-env</type>
-        <name>user_group</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle,spark_shuffle,spark2_shuffle</value>
-    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and cannot start with numbers</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
-    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
-    <description>The auxiliary service class to use </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log-dirs</name>
-    <value>/hadoop/yarn/log</value>
-    <description>
-      Where to store container logs. An application's localized log directory
-      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
-      Individual containers' log directories will be below this, in directories
-      named container_{$contid}. Each container directory will contain the files
-      stderr, stdin, and syslog generated by that container.
-    </description>
-    <value-attributes>
-      <type>directories</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.local-dirs</name>
-    <value>/hadoop/yarn/local</value>
-    <description>
-      List of directories to store localized files in. An
-      application's localized file directory will be found in:
-      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
-      Individual containers' work directories, called container_${contid}, will
-      be subdirectories of this.
-    </description>
-    <value-attributes>
-      <type>directories</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.container-monitor.interval-ms</name>
-    <value>3000</value>
-    <description>
-      The interval, in milliseconds, for which the node manager
-      waits  between two cycles of monitoring its containers' memory usage.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.health-checker.interval-ms</name>
-    <value>135000</value>
-    <description>Frequency of running node health script.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
-    <value>60000</value>
-    <description>Script time out period.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log.retain-seconds</name>
-    <value>604800</value>
-    <description>
-      Time in seconds to retain user logs. Only applicable if
-      log aggregation is disabled.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.log-aggregation-enable</name>
-    <value>true</value>
-    <description>Whether to enable log aggregation. </description>
-    <display-name>Enable Log Aggregation</display-name>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir</name>
-    <value>/app-logs</value>
-    <description>Location to aggregate logs to. </description>
-    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
-    <value>logs</value>
-    <description>
-      The remote log dir will be created at
-      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log-aggregation.compression-type</name>
-    <value>gz</value>
-    <description>
-      T-file compression types used to compress aggregated logs.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.delete.debug-delay-sec</name>
-    <value>0</value>
-    <description>
-      Number of seconds after an application finishes before the nodemanager's
-      DeletionService will delete the application's localized file directory
-      and log directory.
-
-      To diagnose Yarn application problems, set this property's value large
-      enough (for example, to 600 = 10 minutes) to permit examination of these
-      directories. After changing the property's value, you must restart the
-      nodemanager in order for it to have an effect.
-
-      The roots of Yarn applications' work directories is configurable with
-      the yarn.nodemanager.local-dirs property (see below), and the roots
-      of the Yarn applications' log directories is configurable with the
-      yarn.nodemanager.log-dirs property (see also below).
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.log-aggregation.retain-seconds</name>
-    <value>2592000</value>
-    <description>
-      How long to keep aggregation logs before deleting them. -1 disables.
-      Be careful set this too small and you will spam the name node.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.admin-env</name>
-    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
-    <description>
-      Environment variables that should be forwarded from the NodeManager's
-      environment to the container's.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
-    <value>0.25</value>
-    <description>
-      The minimum fraction of number of disks to be healthy for the nodemanager
-      to launch new containers. This correspond to both
-      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
-      If there are less number of healthy local-dirs (or log-dirs) available,
-      then new containers will not be launched on this node.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It's a global
-      setting for all application masters. Each application master can specify
-      its individual maximum number of application attempts via the API, but the
-      individual number cannot be more than the global upper bound. If it is,
-      the resourcemanager will override it. The default number is set to 2, to
-      allow at least one retry for AM.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.webapp.address</name>
-    <value>localhost:8088</value>
-    <description>
-      The address of the RM web application.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.webapp.https.address</name>
-    <value>localhost:8090</value>
-    <description>
-      The https address of the RM web application.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.vmem-check-enabled</name>
-    <value>false</value>
-    <description>
-      Whether virtual memory limits will be enforced for containers.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.log.server.url</name>
-    <value>http://localhost:19888/jobhistory/logs</value>
-    <description>
-      URI for the HistoryServer's log resource
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.nodes.exclude-path</name>
-    <value>/etc/hadoop/conf/yarn.exclude</value>
-    <description>
-      Names a file that contains a list of hosts that are
-      not permitted to connect to the resource manager.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.http.policy</name>
-    <value>HTTP_ONLY</value>
-    <description>
-      This configures the HTTP endpoint for Yarn Daemons.The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY : Service is provided only on https
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.1 -->
-  <property>
-    <name>yarn.timeline-service.enabled</name>
-    <value>true</value>
-    <description>Indicate to clients whether timeline service is enabled or not.
-      If enabled, clients will put entities and events to the timeline server.
-    </description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.generic-application-history.store-class</name>
-    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
-    <description>
-      Store class name for history store, defaulting to file system store
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.webapp.address</name>
-    <value>localhost:8188</value>
-    <description>
-      The http address of the timeline service web application.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.webapp.https.address</name>
-    <value>localhost:8190</value>
-    <description>
-      The http address of the timeline service web application.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.address</name>
-    <value>localhost:10200</value>
-    <description>
-      This is default address for the timeline server to start
-      the RPC server.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.ttl-ms</name>
-    <description>Time to live for timeline store data in milliseconds.</description>
-    <value>2678400000</value>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
-    <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
-    <value>300000</value>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These properties were inherited from HDP 2.2 -->
-  <property>
-    <name>hadoop.registry.rm.enabled</name>
-    <value>false</value>
-    <description>
-      Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when containers, application attempts and applications complete
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hadoop.registry.zk.quorum</name>
-    <value>localhost:2181</value>
-    <description>
-      List of hostname:port pairs defining the zookeeper quorum binding for the registry
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.recovery.enabled</name>
-    <value>true</value>
-    <description>Enable the node manager to recover after starting</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.recovery.dir</name>
-    <value>{{yarn_log_dir_prefix}}/nodemanager/recovery-state</value>
-    <description>
-      The local filesystem directory in which the node manager will store
-      state when recovery is enabled.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
-    <value>10000</value>
-    <description>Time interval between each attempt to connect to NM</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.client.nodemanager-connect.max-wait-ms</name>
-    <value>60000</value>
-    <description>Max time to wait to establish a connection to NM</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.recovery.enabled</name>
-    <value>true</value>
-    <description>
-      Enable RM to recover state after starting.
-      If true, then yarn.resourcemanager.store.class must be specified.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
-    <value>true</value>
-    <description>
-      Enable RM work preserving recovery. This configuration is private to YARN for experimenting the feature.
-    </description>
-    <display-name>Enable Work Preserving Restart</display-name>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.store.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
-    <description>
-      The class to use as the persistent store.
-      If org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore is used,
-      the store is implicitly fenced; meaning a single ResourceManager
-      is able to use the store at any point in time.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-address</name>
-    <value>localhost:2181</value>
-    <description>
-      List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-state-store.parent-path</name>
-    <value>/rmstore</value>
-    <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-acl</name>
-    <value>world:anyone:rwcda</value>
-    <description>ACL's to be used for ZooKeeper znodes.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
-    <value>10000</value>
-    <description>Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.connect.retry-interval.ms</name>
-    <value>30000</value>
-    <description>How often to try connecting to the ResourceManager.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.connect.max-wait.ms</name>
-    <value>900000</value>
-    <description>Maximum time to wait to establish connection to ResourceManager</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-retry-interval-ms</name>
-    <value>1000</value>
-    <description>"Retry interval in milliseconds when connecting to ZooKeeper.
-      When HA is enabled, the value here is NOT used. It is generated
-      automatically from yarn.resourcemanager.zk-timeout-ms and
-      yarn.resourcemanager.zk-num-retries."
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-num-retries</name>
-    <value>1000</value>
-    <description>Number of times RM tries to connect to ZooKeeper.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-timeout-ms</name>
-    <value>10000</value>
-    <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.state-store.max-completed-applications</name>
-    <value>${yarn.resourcemanager.max-completed-applications}</value>
-    <description>The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically,  a smaller value indicates better performance on RM recovery.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
-    <value>2000, 500</value>
-    <description>hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.fs.state-store.uri</name>
-    <value> </value>
-    <description>RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.ha.enabled</name>
-    <value>false</value>
-    <description>enable RM HA or not</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
-    <description>Pre-requisite to use CGroups</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
-    <value>hadoop-yarn</value>
-    <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
-    <value>false</value>
-    <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
-    <value>/cgroup</value>
-    <description>Path used by the LCE to mount cgroups if not found. This path must exist before the NodeManager is launched.</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
-    <value>false</value>
-    <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.resource.cpu-vcores</name>
-    <value>8</value>
-    <description>Number of vcores that can be allocated
-      for containers. This is used by the RM scheduler when allocating
-      resources for containers. This is not used to limit the number of
-      CPUs used by YARN containers. If it is set to -1 and
-      yarn.nodemanager.resource.detect-hardware-capabilities is true, it is
-      automatically determined from the hardware in case of Windows and Linux.
-      In other cases, number of vcores is 8 by default.
-    </description>
-    <display-name>Number of virtual cores</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>32</maximum>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
-    <value>80</value>
-    <description>The amount of CPU allocated for YARN containers - only effective when used with CGroups</description>
-    <display-name>Percentage of physical CPU allocated for all containers on a node</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>100</maximum>
-      <increment-step>1</increment-step>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.node-labels.fs-store.retry-policy-spec</name>
-    <value>2000, 500</value>
-    <description>
-      Retry policy used for FileSystem node label store. The policy is
-      specified by N pairs of sleep-time in milliseconds and number-of-retries
-      &quot;s1,n1,s2,n2,...&quot;.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
-    <value>1000</value>
-    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
-    <value>90</value>
-    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
-    <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
-    <value>3600</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log-aggregation.debug-enabled</name>
-    <value>false</value>
-    <description>
-      This configuration is for debug and test purpose.
-      By setting this configuration as true.
-      We can break the lower bound of yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
-    <value>30</value>
-    <description>This is temporary solution. The configuration will be deleted once, we find a more scalable method to only write a single log file per LRS.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
-    <value>true</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
-    <value>10</value>
-    <description>Number of worker threads that send the yarn system metrics data.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.client.max-retries</name>
-    <value>30</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.client.retry-interval-ms</name>
-    <value>1000</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.ttl-enable</name>
-    <value>true</value>
-    <description>
-      Enable age off of timeline store data.
-    </description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.state-store-class</name>
-    <value>org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore</value>
-    <description>Store class name for timeline state store.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-state-store.path</name>
-    <value>/hadoop/yarn/timeline</value>
-    <description>Store file name for leveldb state store.</description>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
-    <value>/hadoop/yarn/timeline</value>
-    <description>Store file name for leveldb timeline store.</description>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name>
-    <value>104857600</value>
-    <description>
-      Size of read cache for uncompressed blocks for leveldb timeline store in bytes.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name>
-    <value>10000</value>
-    <description>
-      Size of cache for recently read entity start times for leveldb timeline store in number of entities.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name>
-    <value>10000</value>
-    <description>
-      Size of cache for recently written entity start times for leveldb timeline store in number of entities.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.http-authentication.type</name>
-    <value>simple</value>
-    <description>
-      Defines authentication used for the Timeline Server HTTP endpoint.
-      Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
-    <value>true</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
-    <value>false</value>
-    <description>
-      Flag to enable override of the default kerberos authentication filter with
-      the RM authentication filter to allow authentication using delegation
-      tokens(fallback to kerberos if the tokens are missing).
-      Only applicable when the http authentication type is kerberos.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.bind-host</name>
-    <value>0.0.0.0</value>
-    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.bind-host</name>
-    <value>0.0.0.0</value>
-    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.bind-host</name>
-    <value>0.0.0.0</value>
-    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.node-labels.fs-store.root-dir</name>
-    <value>/system/yarn/node-labels</value>
-    <description>
-      URI for NodeLabelManager.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.minimum-allocation-vcores</name>
-    <value>1</value>
-    <description/>
-    <display-name>Minimum Container Size (VCores)</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>8</maximum>
-      <increment-step>1</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.nodemanager.resource.cpu-vcores</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.maximum-allocation-vcores</name>
-    <value>8</value>
-    <description/>
-    <display-name>Maximum Container Size (VCores)</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>8</maximum>
-      <increment-step>1</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.nodemanager.resource.cpu-vcores</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.node-labels.enabled</name>
-    <value>false</value>
-    <description>
-      Enable node labels to restrict YARN applications so that they run only on cluster nodes that have a specified node label.
-    </description>
-    <display-name>Node Labels</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>Enabled</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>Disabled</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.container-executor.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
-    <description>ContainerExecutor for launching containers</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-      <property>
-        <type>core-site</type>
-        <name>hadoop.security.authentication</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.scheduler.monitor.enable</name>
-    <description>
-      Enable a set of periodic monitors (specified in
-      yarn.resourcemanager.scheduler.monitor.policies) that affect the
-      scheduler.
-    </description>
-    <value>false</value>
-    <display-name>Pre-emption</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>Enabled</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>Disabled</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- In HDP 2.3, these properties were deleted:
-yarn.node-labels.manager-class
--->
-
-  <!-- These configs were inherited from HDP 2.3 -->
-  <property>
-    <name>yarn.timeline-service.recovery.enabled</name>
-    <description>
-      Enable timeline server to recover state after starting. If
-      true, then yarn.timeline-service.state-store-class must be specified.
-    </description>
-    <value>true</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.authorization-provider</name>
-    <description> Yarn authorization provider class. </description>
-    <depends-on>
-      <property>
-        <type>ranger-yarn-plugin-properties</type>
-        <name>ranger-yarn-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!--ats v1.5 properties-->
-  <property>
-    <name>yarn.timeline-service.version</name>
-    <value>1.5</value>
-    <description>Timeline service version we&#x2019;re currently using.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.store-class</name>
-    <value>org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore</value>
-    <description>Main storage class for YARN timeline server.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.active-dir</name>
-    <value>/ats/active/</value>
-    <description>DFS path to store active application&#x2019;s timeline data</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.done-dir</name>
-    <value>/ats/done/</value>
-    <description>DFS path to store done application&#x2019;s timeline data</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes</name>
-    <value/>
-    <description>Plugins that can translate a timeline entity read request into a list of timeline cache ids, separated by commas. </description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- advanced ats v1.5 properties-->
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.summary-store</name>
-    <description>Summary storage for ATS v1.5</description>
-    <!-- Use rolling leveldb, advanced -->
-    <value>org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.scan-interval-seconds</name>
-    <description>
-      Scan interval for ATS v1.5 entity group file system storage reader.This
-      value controls how frequent the reader will scan the HDFS active directory
-      for application status.
-    </description>
-    <!-- Default is 60 seconds, advanced -->
-    <value>60</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds</name>
-    <description>
-      Scan interval for ATS v1.5 entity group file system storage cleaner.This
-      value controls how frequent the reader will scan the HDFS done directory
-      for stale application data.
-    </description>
-    <!-- 3600 is default, advanced -->
-    <value>3600</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.retain-seconds</name>
-    <description>
-      How long the ATS v1.5 entity group file system storage will keep an
-      application's data in the done directory.
-    </description>
-    <!-- 7 days is default, advanced -->
-    <value>604800</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.4 -->
-  <property>
-    <name>yarn.nodemanager.aux-services.spark_shuffle.class</name>
-    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
-    <description>The auxiliary service class to use for Spark</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.5 -->
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle,spark_shuffle,spark2_shuffle</value>
-    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and cannot start with numbers</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.aux-services.spark2_shuffle.class</name>
-    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
-    <description>The auxiliary service class to use for Spark 2</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.container-metrics.unregister-delay-ms</name>
-    <value>60000</value>
-    <description>The delay time ms to unregister container metrics after completion.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath</name>
-    <value/>
-    <description>Classpath for all plugins defined in yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes.</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
deleted file mode 100644
index e690204..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
+++ /dev/null
@@ -1,278 +0,0 @@
-{
-  "services": [
-    {
-      "name": "YARN",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "configurations": [
-        {
-          "yarn-site": {
-            "yarn.timeline-service.enabled": "true",
-            "yarn.timeline-service.http-authentication.type": "kerberos",
-            "yarn.acl.enable": "true",
-            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
-            "yarn.timeline-service.http-authentication.signature.secret": "",
-            "yarn.timeline-service.http-authentication.signature.secret.file": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
-            "yarn.timeline-service.http-authentication.token.validity": "",
-            "yarn.timeline-service.http-authentication.cookie.domain": "",
-            "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
-            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
-            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": ""
-          }
-        },
-        {
-          "core-site": {
-            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
-            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
-          }
-        },
-        {
-          "capacity-scheduler": {
-            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
-          }
-        },
-        {
-          "ranger-yarn-audit": {
-            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
-            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
-            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
-            "xasecure.audit.jaas.Client.option.storeKey": "false",
-            "xasecure.audit.jaas.Client.option.serviceName": "solr",
-            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "NODEMANAGER",
-          "identities": [
-            {
-              "name": "nodemanager_nm",
-              "principal": {
-                "value": "nm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.nodemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.nodemanager.keytab"
-              }
-            },
-            {
-              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
-              "principal": {
-                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal"
-              },
-              "keytab": {
-                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file"
-              },
-              "when" : {
-                "contains" : ["services", "HIVE"]
-              }
-            },
-            {
-              "name": "llap_zk_hive",
-              "principal": {
-                "value": "hive/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": "r"
-                },
-                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
-              },
-              "when" : {
-                "contains" : ["services", "HIVE"]
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "yarn-site": {
-                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
-              }
-            }
-          ]
-        },
-        {
-          "name": "RESOURCEMANAGER",
-          "identities": [
-            {
-              "name": "resource_manager_rm",
-              "principal": {
-                "value": "rm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.resourcemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/rm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.resourcemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
-              }
-            },
-            {
-              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
-              "principal": {
-                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal"
-              },
-              "keytab": {
-                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab"
-              }
-            }
-          ]
-        },
-        {
-          "name": "APP_TIMELINE_SERVER",
-          "identities": [
-            {
-              "name": "app_timeline_server_yarn",
-              "principal": {
-                "value": "yarn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.timeline-service.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/yarn.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.timeline-service.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
-              }
-            },
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "name": "MAPREDUCE2",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "components": [
-        {
-          "name": "HISTORYSERVER",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "history_server_jhs",
-              "principal": {
-                "value": "jhs/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "mapred-site/mapreduce.jobhistory.principal",
-                "local_username": "${mapred-env/mapred_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/jhs.service.keytab",
-                "owner": {
-                  "name": "${mapred-env/mapred_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}
\ No newline at end of file


[05/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
deleted file mode 100644
index 48352e8..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
+++ /dev/null
@@ -1,383 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>YARN</name>
-      <displayName>YARN</displayName>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>3.0.0.3.0</version>
-      <components>
-
-        <component>
-          <component>
-            <name>APP_TIMELINE_SERVER</name>
-            <displayName>App Timeline Server</displayName>
-            <category>MASTER</category>
-            <cardinality>1</cardinality>
-            <versionAdvertised>true</versionAdvertised>
-            <reassignAllowed>true</reassignAllowed>
-
-            <commandScript>
-              <script>scripts/application_timeline_server.py</script>
-              <scriptType>PYTHON</scriptType>
-              <timeout>1200</timeout>
-            </commandScript>
-
-            <dependencies>
-              <dependency>
-                <name>TEZ/TEZ_CLIENT</name>
-                <scope>host</scope>
-                <auto-deploy>
-                  <enabled>true</enabled>
-                </auto-deploy>
-              </dependency>
-              <dependency>
-                <name>SPARK/SPARK_CLIENT</name>
-                <scope>host</scope>
-                <auto-deploy>
-                  <enabled>true</enabled>
-                </auto-deploy>
-              </dependency>
-            </dependencies>
-          </component>
-
-          <name>RESOURCEMANAGER</name>
-          <displayName>ResourceManager</displayName>
-          <category>MASTER</category>
-          <cardinality>1-2</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <reassignAllowed>true</reassignAllowed>
-          <commandScript>
-            <script>scripts/resourcemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <dependencies>
-            <dependency>
-              <name>TEZ/TEZ_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <logs>
-            <log>
-              <logId>yarn_resourcemanager</logId>
-              <primary>true</primary>
-            </log>
-            <log>
-              <logId>yarn_historyserver</logId>
-            </log>
-            <log>
-              <logId>yarn_jobsummary</logId>
-            </log>
-          </logs>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/resourcemanager.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-            <customCommand>
-              <name>REFRESHQUEUES</name>
-              <commandScript>
-                <script>scripts/resourcemanager.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-          <configuration-dependencies>
-            <config-type>capacity-scheduler</config-type>
-            <config-type>hdfs-site</config-type>
-          </configuration-dependencies>
-        </component>
-
-        <component>
-          <name>NODEMANAGER</name>
-          <displayName>NodeManager</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <decommissionAllowed>true</decommissionAllowed>
-          <commandScript>
-            <script>scripts/nodemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-           <bulkCommands>
-             <displayName>NodeManagers</displayName>
-             <!-- Used by decommission and recommission -->
-             <masterComponent>RESOURCEMANAGER</masterComponent>
-           </bulkCommands>
-          <logs>
-            <log>
-              <logId>yarn_nodemanager</logId>
-            </log>
-          </logs>
-        </component>
-
-        <component>
-          <name>YARN_CLIENT</name>
-          <displayName>YARN Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <commandScript>
-            <script>scripts/yarn_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>yarn-site.xml</fileName>
-              <dictionaryName>yarn-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>yarn-env.sh</fileName>
-              <dictionaryName>yarn-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>capacity-scheduler.xml</fileName>
-              <dictionaryName>capacity-scheduler</dictionaryName>
-            </configFile>                        
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-yarn</name>
-            </package>
-            <package>
-              <name>hadoop-hdfs</name>
-            </package>
-            <package>
-              <name>hadoop-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>HDFS</service>
-        <service>MAPREDUCE2</service>
-      </requiredServices>
-
-      <themes>
-        <theme>
-          <fileName>theme.json</fileName>
-          <default>true</default>
-        </theme>
-      </themes>
-
-      <quickLinksConfigurations>
-        <quickLinksConfiguration>
-          <fileName>quicklinks.json</fileName>
-          <default>true</default>
-        </quickLinksConfiguration>
-      </quickLinksConfigurations>
-
-      <configuration-dependencies>
-        <config-type>yarn-site</config-type>
-        <config-type>yarn-env</config-type>
-        <config-type>hdfs-site</config-type>
-        <config-type>hadoop-env</config-type>
-        <config-type>core-site</config-type>
-        <config-type>mapred-site</config-type>
-        <config-type>yarn-log4j</config-type>
-        <config-type>ams-ssl-client</config-type>
-        <config-type>ranger-yarn-plugin-properties</config-type>
-        <config-type>ranger-yarn-audit</config-type>
-        <config-type>ranger-yarn-policymgr-ssl</config-type>
-        <config-type>ranger-yarn-security</config-type>
-      </configuration-dependencies>
-
-      <widgetsFileName>YARN_widgets.json</widgetsFileName>
-      <metricsFileName>YARN_metrics.json</metricsFileName>
-    </service>
-
-    <service>
-      <name>MAPREDUCE2</name>
-      <displayName>MapReduce2</displayName>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.1.0.2.0.6.0</version>
-
-      <components>
-        <component>
-          <name>HISTORYSERVER</name>
-          <displayName>History Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <reassignAllowed>true</reassignAllowed>
-          <auto-deploy>
-            <enabled>true</enabled>
-            <co-locate>YARN/RESOURCEMANAGER</co-locate>
-          </auto-deploy>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>TEZ/TEZ_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>SLIDER/SLIDER</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/historyserver.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <logs>
-            <log>
-              <logId>mapred_historyserver</logId>
-              <primary>true</primary>
-            </log>
-          </logs>
-        </component>
-
-        <component>
-          <name>MAPREDUCE2_CLIENT</name>
-          <displayName>MapReduce2 Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <commandScript>
-            <script>scripts/mapreduce2_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>mapred-site.xml</fileName>
-              <dictionaryName>mapred-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>mapred-env.sh</fileName>
-              <dictionaryName>mapred-env</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/mapred_service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
-      <themes-dir>themes-mapred</themes-dir>
-      <themes>
-        <theme>
-          <fileName>theme.json</fileName>
-          <default>true</default>
-        </theme>
-      </themes>
-
-      <quickLinksConfigurations-dir>quicklinks-mapred</quickLinksConfigurations-dir>
-      <quickLinksConfigurations>
-        <quickLinksConfiguration>
-          <fileName>quicklinks.json</fileName>
-          <default>true</default>
-        </quickLinksConfiguration>
-      </quickLinksConfigurations>
-
-      <configuration-dir>configuration-mapred</configuration-dir>
-
-      <configuration-dependencies>
-        <config-type>hdfs-site</config-type>
-        <config-type>hadoop-env</config-type>
-        <config-type>core-site</config-type>
-        <config-type>mapred-site</config-type>
-        <config-type>mapred-env</config-type>
-        <config-type>ssl-client</config-type>
-        <config-type>ssl-server</config-type>
-        <config-type>ams-ssl-client</config-type>
-      </configuration-dependencies>
-      <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
-      <widgetsFileName>MAPREDUCE2_widgets.json</widgetsFileName>
-      <metricsFileName>MAPREDUCE2_metrics.json</metricsFileName>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/alerts/alert_nodemanager_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/alerts/alert_nodemanager_health.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/alerts/alert_nodemanager_health.py
deleted file mode 100644
index d7159e4..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/alerts/alert_nodemanager_health.py
+++ /dev/null
@@ -1,209 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import socket
-import urllib2
-import logging
-import traceback
-from ambari_commons import OSCheck
-from ambari_commons.inet_utils import resolve_address
-from resource_management.libraries.functions.curl_krb_request import curl_krb_request
-from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
-from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
-from resource_management.core.environment import Environment
-
-RESULT_CODE_OK = 'OK'
-RESULT_CODE_CRITICAL = 'CRITICAL'
-RESULT_CODE_UNKNOWN = 'UNKNOWN'
-
-NODEMANAGER_HTTP_ADDRESS_KEY = '{{yarn-site/yarn.nodemanager.webapp.address}}'
-NODEMANAGER_HTTPS_ADDRESS_KEY = '{{yarn-site/yarn.nodemanager.webapp.https.address}}'
-YARN_HTTP_POLICY_KEY = '{{yarn-site/yarn.http.policy}}'
-
-OK_MESSAGE = 'NodeManager Healthy'
-CRITICAL_CONNECTION_MESSAGE = 'Connection failed to {0} ({1})'
-CRITICAL_HTTP_STATUS_MESSAGE = 'HTTP {0} returned from {1} ({2}) \n{3}'
-CRITICAL_NODEMANAGER_STATUS_MESSAGE = 'NodeManager returned an unexpected status of "{0}"'
-CRITICAL_NODEMANAGER_UNKNOWN_JSON_MESSAGE = 'Unable to determine NodeManager health from unexpected JSON response'
-
-KERBEROS_KEYTAB = '{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}'
-KERBEROS_PRINCIPAL = '{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
-EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
-
-NODEMANAGER_DEFAULT_PORT = 8042
-
-CONNECTION_TIMEOUT_KEY = 'connection.timeout'
-CONNECTION_TIMEOUT_DEFAULT = 5.0
-
-LOGGER_EXCEPTION_MESSAGE = "[Alert] NodeManager Health on {0} fails:"
-logger = logging.getLogger('ambari_alerts')
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (NODEMANAGER_HTTP_ADDRESS_KEY,NODEMANAGER_HTTPS_ADDRESS_KEY, EXECUTABLE_SEARCH_PATHS,
-  YARN_HTTP_POLICY_KEY, SMOKEUSER_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY)
-  
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-  result_code = RESULT_CODE_UNKNOWN
-
-  if configurations is None:
-    return (result_code, ['There were no configurations supplied to the script.'])
-
-  if host_name is None:
-    host_name = socket.getfqdn()
-
-  scheme = 'http'
-  http_uri = None
-  https_uri = None
-  http_policy = 'HTTP_ONLY'
-
-  if SMOKEUSER_KEY in configurations:
-    smokeuser = configurations[SMOKEUSER_KEY]
-
-  executable_paths = None
-  if EXECUTABLE_SEARCH_PATHS in configurations:
-    executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
-
-  security_enabled = False
-  if SECURITY_ENABLED_KEY in configurations:
-    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
-
-  kerberos_keytab = None
-  if KERBEROS_KEYTAB in configurations:
-    kerberos_keytab = configurations[KERBEROS_KEYTAB]
-
-  kerberos_principal = None
-  if KERBEROS_PRINCIPAL in configurations:
-    kerberos_principal = configurations[KERBEROS_PRINCIPAL]
-    kerberos_principal = kerberos_principal.replace('_HOST', host_name)
-
-  if NODEMANAGER_HTTP_ADDRESS_KEY in configurations:
-    http_uri = configurations[NODEMANAGER_HTTP_ADDRESS_KEY]
-
-  if NODEMANAGER_HTTPS_ADDRESS_KEY in configurations:
-    https_uri = configurations[NODEMANAGER_HTTPS_ADDRESS_KEY]
-
-  if YARN_HTTP_POLICY_KEY in configurations:
-    http_policy = configurations[YARN_HTTP_POLICY_KEY]
-
-
-  # parse script arguments
-  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
-  if CONNECTION_TIMEOUT_KEY in parameters:
-    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
-
-
-  # determine the right URI and whether to use SSL
-  host_port = http_uri
-  if http_policy == 'HTTPS_ONLY':
-    scheme = 'https'
-
-    if https_uri is not None:
-      host_port = https_uri
-
-  label = ''
-  url_response = None
-  node_healthy = 'false'
-  total_time = 0
-
-  # replace hostname on host fqdn to make it work on all environments
-  if host_port is not None:
-    if ":" in host_port:
-      uri_host, uri_port = host_port.split(':')
-      host_port = '{0}:{1}'.format(host_name, uri_port)
-    else:
-      host_port = host_name
-
-  # some yarn-site structures don't have the web ui address
-  if host_port is None:
-    host_port = '{0}:{1}'.format(host_name, NODEMANAGER_DEFAULT_PORT)
-
-  query = "{0}://{1}/ws/v1/node/info".format(scheme, host_port)
-
-  try:
-    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
-      env = Environment.get_instance()
-
-      # curl requires an integer timeout
-      curl_connection_timeout = int(connection_timeout)
-
-      kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
-
-      url_response, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
-        query, "nm_health_alert", executable_paths, False, "NodeManager Health", smokeuser,
-        connection_timeout=curl_connection_timeout, kinit_timer_ms = kinit_timer_ms)
-
-      json_response = json.loads(url_response)
-    else:
-      # execute the query for the JSON that includes templeton status
-      url_response = urllib2.urlopen(query, timeout=connection_timeout)
-      json_response = json.loads(url_response.read())
-  except urllib2.HTTPError, httpError:
-    label = CRITICAL_HTTP_STATUS_MESSAGE.format(str(httpError.code), query,
-      str(httpError), traceback.format_exc())
-
-    return (RESULT_CODE_CRITICAL, [label])
-  except:
-    label = CRITICAL_CONNECTION_MESSAGE.format(query, traceback.format_exc())
-    return (RESULT_CODE_CRITICAL, [label])
-
-  # URL response received, parse it
-  try:
-    node_healthy = json_response['nodeInfo']['nodeHealthy']
-    node_healthy_report = json_response['nodeInfo']['healthReport']
-
-    # convert boolean to string
-    node_healthy = str(node_healthy)
-  except:
-    return (RESULT_CODE_CRITICAL, [query + "\n" + traceback.format_exc()])
-  finally:
-    if url_response is not None:
-      try:
-        url_response.close()
-      except:
-        pass
-
-  # proper JSON received, compare against known value
-  if node_healthy.lower() == 'true':
-    result_code = RESULT_CODE_OK
-    label = OK_MESSAGE
-  elif node_healthy.lower() == 'false':
-    result_code = RESULT_CODE_CRITICAL
-    label = node_healthy_report
-  else:
-    result_code = RESULT_CODE_CRITICAL
-    label = CRITICAL_NODEMANAGER_STATUS_MESSAGE.format(node_healthy)
-
-  return (result_code, [label])

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/alerts/alert_nodemanagers_summary.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/alerts/alert_nodemanagers_summary.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/alerts/alert_nodemanagers_summary.py
deleted file mode 100644
index adf27ec..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/alerts/alert_nodemanagers_summary.py
+++ /dev/null
@@ -1,219 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import urllib2
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import logging
-import traceback
-
-from ambari_commons.urllib_handlers import RefreshHeaderProcessor
-from resource_management.libraries.functions.curl_krb_request import curl_krb_request
-from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
-from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
-from resource_management.core.environment import Environment
-
-ERROR_LABEL = '{0} NodeManager{1} {2} unhealthy.'
-OK_LABEL = 'All NodeManagers are healthy'
-
-NODEMANAGER_HTTP_ADDRESS_KEY = '{{yarn-site/yarn.resourcemanager.webapp.address}}'
-NODEMANAGER_HTTPS_ADDRESS_KEY = '{{yarn-site/yarn.resourcemanager.webapp.https.address}}'
-YARN_HTTP_POLICY_KEY = '{{yarn-site/yarn.http.policy}}'
-
-KERBEROS_KEYTAB = '{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}'
-KERBEROS_PRINCIPAL = '{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
-EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
-
-CONNECTION_TIMEOUT_KEY = 'connection.timeout'
-CONNECTION_TIMEOUT_DEFAULT = 5.0
-
-LOGGER_EXCEPTION_MESSAGE = "[Alert] NodeManager Health Summary on {0} fails:"
-logger = logging.getLogger('ambari_alerts')
-
-QRY = "Hadoop:service=ResourceManager,name=RMNMInfo"
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return NODEMANAGER_HTTP_ADDRESS_KEY, NODEMANAGER_HTTPS_ADDRESS_KEY, EXECUTABLE_SEARCH_PATHS, \
-    YARN_HTTP_POLICY_KEY, SMOKEUSER_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY
-
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  if configurations is None:
-    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
-
-  scheme = 'http'  
-  http_uri = None
-  https_uri = None
-  http_policy = 'HTTP_ONLY'
-
-  security_enabled = False
-  if SECURITY_ENABLED_KEY in configurations:
-    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
-
-  executable_paths = None
-  if EXECUTABLE_SEARCH_PATHS in configurations:
-    executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
-
-  kerberos_keytab = None
-  if KERBEROS_KEYTAB in configurations:
-    kerberos_keytab = configurations[KERBEROS_KEYTAB]
-
-  kerberos_principal = None
-  if KERBEROS_PRINCIPAL in configurations:
-    kerberos_principal = configurations[KERBEROS_PRINCIPAL]
-    kerberos_principal = kerberos_principal.replace('_HOST', host_name)
-
-  if NODEMANAGER_HTTP_ADDRESS_KEY in configurations:
-    http_uri = configurations[NODEMANAGER_HTTP_ADDRESS_KEY]
-
-  if NODEMANAGER_HTTPS_ADDRESS_KEY in configurations:
-    https_uri = configurations[NODEMANAGER_HTTPS_ADDRESS_KEY]
-
-  if YARN_HTTP_POLICY_KEY in configurations:
-    http_policy = configurations[YARN_HTTP_POLICY_KEY]
-    
-  if SMOKEUSER_KEY in configurations:
-    smokeuser = configurations[SMOKEUSER_KEY]
-
-  # parse script arguments
-  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
-  if CONNECTION_TIMEOUT_KEY in parameters:
-    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
-
-  kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
-
-  # determine the right URI and whether to use SSL
-  uri = http_uri
-  if http_policy == 'HTTPS_ONLY':
-    scheme = 'https'
-
-    if https_uri is not None:
-      uri = https_uri
-
-  uri = str(host_name) + ":" + uri.split(":")[1]
-  live_nodemanagers_qry = "{0}://{1}/jmx?qry={2}".format(scheme, uri, QRY)
-  convert_to_json_failed = False
-  response_code = None
-  try:
-    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
-      env = Environment.get_instance()
-
-      # curl requires an integer timeout
-      curl_connection_timeout = int(connection_timeout)
-
-      url_response, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
-        live_nodemanagers_qry, "nm_health_summary_alert", executable_paths, False,
-        "NodeManager Health Summary", smokeuser, connection_timeout=curl_connection_timeout,
-        kinit_timer_ms = kinit_timer_ms)
-
-      try:
-        url_response_json = json.loads(url_response)
-        live_nodemanagers = json.loads(find_value_in_jmx(url_response_json, "LiveNodeManagers", live_nodemanagers_qry))
-      except ValueError, error:
-        convert_to_json_failed = True
-        logger.exception("[Alert][{0}] Convert response to json failed or json doesn't contain needed data: {1}".
-        format("NodeManager Health Summary", str(error)))
-
-      if convert_to_json_failed:
-        response_code, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
-          live_nodemanagers_qry, "nm_health_summary_alert", executable_paths, True,
-          "NodeManager Health Summary", smokeuser, connection_timeout=curl_connection_timeout,
-          kinit_timer_ms = kinit_timer_ms)
-    else:
-      live_nodemanagers = json.loads(get_value_from_jmx(live_nodemanagers_qry,
-      "LiveNodeManagers", connection_timeout))
-
-    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
-      if response_code in [200, 307] and convert_to_json_failed:
-        return ('UNKNOWN', ['HTTP {0} response (metrics unavailable)'.format(str(response_code))])
-      elif convert_to_json_failed and response_code not in [200, 307]:
-        raise Exception("[Alert][NodeManager Health Summary] Getting data from {0} failed with http code {1}".format(
-          str(live_nodemanagers_qry), str(response_code)))
-
-    unhealthy_count = 0
-
-    for nodemanager in live_nodemanagers:
-      health_report = nodemanager['State']
-      if health_report == 'UNHEALTHY':
-        unhealthy_count += 1
-
-    if unhealthy_count == 0:
-      result_code = 'OK'
-      label = OK_LABEL
-    else:
-      result_code = 'CRITICAL'
-      if unhealthy_count == 1:
-        label = ERROR_LABEL.format(unhealthy_count, '', 'is')
-      else:
-        label = ERROR_LABEL.format(unhealthy_count, 's', 'are')
-
-  except:
-    label = traceback.format_exc()
-    result_code = 'UNKNOWN'
-
-  return (result_code, [label])
-
-
-def get_value_from_jmx(query, jmx_property, connection_timeout):
-  response = None
-  
-  try:
-    # use a customer header process that will look for the non-standard
-    # "Refresh" header and attempt to follow the redirect
-    url_opener = urllib2.build_opener(RefreshHeaderProcessor())
-    response = url_opener.open(query, timeout=connection_timeout)
-
-    data = response.read()
-    data_dict = json.loads(data)
-    return find_value_in_jmx(data_dict, jmx_property, query)
-  finally:
-    if response is not None:
-      try:
-        response.close()
-      except:
-        pass
-
-
-def find_value_in_jmx(data_dict, jmx_property, query):
-  json_data = data_dict["beans"][0]
-
-  if jmx_property not in json_data:
-    beans = data_dict['beans']
-    for jmx_prop_list_item in beans:
-      if "name" in jmx_prop_list_item and jmx_prop_list_item["name"] == QRY:
-        if jmx_property not in jmx_prop_list_item:
-          raise Exception("Unable to find {0} in JSON from {1} ".format(jmx_property, query))
-        json_data = jmx_prop_list_item
-
-  return json_data[jmx_property]
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/files/validateYarnComponentStatusWindows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/files/validateYarnComponentStatusWindows.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/files/validateYarnComponentStatusWindows.py
deleted file mode 100644
index 5e2b4d9..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/files/validateYarnComponentStatusWindows.py
+++ /dev/null
@@ -1,161 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import subprocess
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import urllib2
-
-RESOURCEMANAGER = 'rm'
-NODEMANAGER = 'nm'
-HISTORYSERVER = 'hs'
-
-STARTED_STATE = 'STARTED'
-RUNNING_STATE = 'RUNNING'
-
-#Return reponse for given path and address
-def getResponse(path, address, ssl_enabled):
-  if ssl_enabled:
-    url = 'https://' + address + path
-  else:
-    url = 'http://' + address + path
-
-  try:
-    handle = urllib2.urlopen(url)
-    output = handle.read()
-    handle.close()
-    response = json.loads(output)
-    if response == None:
-      print 'There is no response for url: ' + str(url)
-      exit(1)
-    return response
-  except Exception as e:
-    print 'Error getting response for url:' + str(url), e
-    exit(1)
-
-#Verify that REST api is available for given component
-def validateAvailability(component, path, address, ssl_enabled):
-
-  try:
-    response = getResponse(path, address, ssl_enabled)
-    is_valid = validateAvailabilityResponse(component, response)
-    if not is_valid:
-      exit(1)
-  except Exception as e:
-    print 'Error checking availability status of component', e
-    exit(1)
-
-#Validate component-specific response
-def validateAvailabilityResponse(component, response):
-  try:
-    if component == RESOURCEMANAGER:
-      rm_state = response['clusterInfo']['state']
-      if rm_state == STARTED_STATE:
-        return True
-      else:
-        print 'Resourcemanager is not started'
-        return False
-
-    elif component == NODEMANAGER:
-      node_healthy = bool(response['nodeInfo']['nodeHealthy'])
-      if node_healthy:
-        return True
-      else:
-        return False
-    elif component == HISTORYSERVER:
-      hs_start_time = response['historyInfo']['startedOn']
-      if hs_start_time > 0:
-        return True
-      else:
-        return False
-    else:
-      return False
-  except Exception as e:
-    print 'Error validation of availability response for ' + str(component), e
-    return False
-
-#Verify that component has required resources to work
-def validateAbility(component, path, address, ssl_enabled):
-
-  try:
-    response = getResponse(path, address, ssl_enabled)
-    is_valid = validateAbilityResponse(component, response)
-    if not is_valid:
-      exit(1)
-  except Exception as e:
-    print 'Error checking ability of component', e
-    exit(1)
-
-#Validate component-specific response that it has required resources to work
-def validateAbilityResponse(component, response):
-  try:
-    if component == RESOURCEMANAGER:
-      nodes = []
-      if response.has_key('nodes') and not response['nodes'] == None and response['nodes'].has_key('node'):
-        nodes = response['nodes']['node']
-      connected_nodes_count = len(nodes)
-      if connected_nodes_count == 0:
-        print 'There is no connected nodemanagers to resourcemanager'
-        return False
-      active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
-      active_nodes_count = len(active_nodes)
-
-      if connected_nodes_count == 0:
-        print 'There is no connected active nodemanagers to resourcemanager'
-        return False
-      else:
-        return True
-    else:
-      return False
-  except Exception as e:
-    print 'Error validation of ability response', e
-    return False
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-p", "--port", dest="address", help="Host:Port for REST API of a desired component")
-  parser.add_option("-s", "--ssl", dest="ssl_enabled", help="Is SSL enabled for UI of component")
-
-  (options, args) = parser.parse_args()
-
-  component = args[0]
-
-  address = options.address
-  ssl_enabled = (options.ssl_enabled) in 'true'
-  if component == RESOURCEMANAGER:
-    path = '/ws/v1/cluster/info'
-  elif component == NODEMANAGER:
-    path = '/ws/v1/node/info'
-  elif component == HISTORYSERVER:
-    path = '/ws/v1/history/info'
-  else:
-    parser.error("Invalid component")
-
-  validateAvailability(component, path, address, ssl_enabled)
-
-  if component == RESOURCEMANAGER:
-    path = '/ws/v1/cluster/nodes'
-    validateAbility(component, path, address, ssl_enabled)
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/__init__.py
deleted file mode 100644
index 35de4bb..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py
deleted file mode 100644
index 03fff21..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py
+++ /dev/null
@@ -1,162 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions import check_process_status
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties,\
-  FILE_TYPE_XML
-from resource_management.libraries.functions.format import format
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute
-
-from yarn import yarn
-from service import service
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-
-
-class ApplicationTimelineServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('timelineserver', action='start')
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    service('timelineserver', action='stop')
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn(name='apptimelineserver')
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class ApplicationTimelineServerWindows(ApplicationTimelineServer):
-  def status(self, env):
-    service('timelineserver', action='status')
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class ApplicationTimelineServerDefault(ApplicationTimelineServer):
-  def get_component_name(self):
-    return "hadoop-yarn-timelineserver"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-yarn-timelineserver", params.version)
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.yarn_historyserver_pid_file)
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.enabled": "true",
-                           "yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.timeline-service.principal",
-                           "yarn.timeline-service.keytab",
-                           "yarn.timeline-service.http-authentication.kerberos.principal",
-                           "yarn.timeline-service.http-authentication.kerberos.keytab"]
-
-      props_read_check = ["yarn.timeline-service.keytab",
-                          "yarn.timeline-service.http-authentication.kerberos.keytab"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                                  props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.timeline-service.keytab' not in security_params['yarn-site']
-               or 'yarn.timeline-service.principal' not in security_params['yarn-site']) \
-            or 'yarn.timeline-service.http-authentication.kerberos.keytab' not in security_params['yarn-site'] \
-            or 'yarn.timeline-service.http-authentication.kerberos.principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.timeline-service.keytab'],
-                                security_params['yarn-site']['yarn.timeline-service.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.keytab'],
-                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-  def get_log_folder(self):
-    import params
-    return params.yarn_log_dir
-  
-  def get_user(self):
-    import params
-    return params.yarn_user
-
-  def get_pid_files(self):
-    import status_params
-    Execute(format("mv {status_params.yarn_historyserver_pid_file_old} {status_params.yarn_historyserver_pid_file}"),
-            only_if = format("test -e {status_params.yarn_historyserver_pid_file_old}", user=status_params.yarn_user))
-    return [status_params.yarn_historyserver_pid_file]
-
-if __name__ == "__main__":
-  ApplicationTimelineServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py
deleted file mode 100644
index 8f5d380..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py
+++ /dev/null
@@ -1,192 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-from resource_management.core.source import Template
-from resource_management.core.logger import Logger
-
-from install_jars import install_tez_jars
-from yarn import yarn
-from service import service
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-
-
-class HistoryServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    service('historyserver', action='stop', serviceName='mapreduce')
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn(name="historyserver")
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class HistoryserverWindows(HistoryServer):
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    service('historyserver', action='start', serviceName='mapreduce')
-
-  def status(self, env):
-    service('historyserver', action='status')
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class HistoryServerDefault(HistoryServer):
-  def get_component_name(self):
-    return "hadoop-mapreduce-historyserver"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-mapreduce-historyserver", params.version)
-      # MC Hammer said, "Can't touch this"
-      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
-      copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
-      copy_to_hdfs("slider", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
-      params.HdfsResource(None, action="execute")
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-
-    if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
-      # MC Hammer said, "Can't touch this"
-      resource_created = copy_to_hdfs(
-        "mapreduce",
-        params.user_group,
-        params.hdfs_user,
-        skip=params.sysprep_skip_copy_tarballs_hdfs)
-      resource_created = copy_to_hdfs(
-        "tez",
-        params.user_group,
-        params.hdfs_user,
-        skip=params.sysprep_skip_copy_tarballs_hdfs) or resource_created
-      resource_created = copy_to_hdfs(
-        "slider",
-        params.user_group,
-        params.hdfs_user,
-        skip=params.sysprep_skip_copy_tarballs_hdfs) or resource_created
-      if resource_created:
-        params.HdfsResource(None, action="execute")
-    else:
-      # In stack versions before copy_tarball_to_hdfs support tez.tar.gz was copied to a different folder in HDFS.
-      install_tez_jars()
-
-    service('historyserver', action='start', serviceName='mapreduce')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.mapred_historyserver_pid_file)
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      expectations = {}
-      expectations.update(build_expectations('mapred-site',
-                                             None,
-                                             [
-                                               'mapreduce.jobhistory.keytab',
-                                               'mapreduce.jobhistory.principal',
-                                               'mapreduce.jobhistory.webapp.spnego-keytab-file',
-                                               'mapreduce.jobhistory.webapp.spnego-principal'
-                                             ],
-                                             None))
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'mapred-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'mapred-site' not in security_params or
-               'mapreduce.jobhistory.keytab' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.principal' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.webapp.spnego-keytab-file' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.webapp.spnego-principal' not in security_params['mapred-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal not set."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.mapred_user,
-                                security_params['mapred-site']['mapreduce.jobhistory.keytab'],
-                                security_params['mapred-site']['mapreduce.jobhistory.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.mapred_user,
-                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-keytab-file'],
-                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-  def get_log_folder(self):
-    import params
-    return params.mapred_log_dir
-
-  def get_user(self):
-    import params
-    return params.mapred_user
-
-  def get_pid_files(self):
-    import status_params
-    return [status_params.mapred_historyserver_pid_file]
-
-if __name__ == "__main__":
-  HistoryServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/install_jars.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/install_jars.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/install_jars.py
deleted file mode 100644
index 728a014..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/install_jars.py
+++ /dev/null
@@ -1,99 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.functions.format import format
-import os
-import glob
-
-def install_tez_jars():
-  import params
-
-  destination_hdfs_dirs = get_tez_hdfs_dir_paths(params.tez_lib_uris)
-
-  # If tez libraries are to be stored in hdfs
-  if destination_hdfs_dirs:
-    for hdfs_dir in destination_hdfs_dirs:
-      params.HdfsResource(hdfs_dir,
-                           type="directory",
-                           action="create_on_execute",
-                           owner=params.tez_user,
-                           mode=0755
-      )
-
-    app_dir_path = None
-    lib_dir_path = None
-
-    if len(destination_hdfs_dirs) > 0:
-      for path in destination_hdfs_dirs:
-        if 'lib' in path:
-          lib_dir_path = path
-        else:
-          app_dir_path = path
-        pass
-      pass
-    pass
-
-    tez_jars = {}
-    if app_dir_path:
-      tez_jars[params.tez_local_api_jars] = app_dir_path
-    if lib_dir_path:
-      tez_jars[params.tez_local_lib_jars] = lib_dir_path
-
-    for src_file_regex, dest_dir in tez_jars.iteritems():
-      for src_filepath in glob.glob(src_file_regex):
-        src_filename = os.path.basename(src_filepath)
-        params.HdfsResource(format("{dest_dir}/{src_filename}"),
-                            type="file",
-                            action="create_on_execute",
-                            source=src_filepath,
-                            mode=0755,
-                            owner=params.tez_user
-         )
-        
-    for src_file_regex, dest_dir in tez_jars.iteritems():
-      for src_filepath in glob.glob(src_file_regex):
-        src_filename = os.path.basename(src_filepath)
-        params.HdfsResource(format("{dest_dir}/{src_filename}"),
-                            type="file",
-                            action="create_on_execute",
-                            source=src_filepath,
-                            mode=0755,
-                            owner=params.tez_user
-         )
-    params.HdfsResource(None, action="execute")
-
-
-def get_tez_hdfs_dir_paths(tez_lib_uris = None):
-  hdfs_path_prefix = 'hdfs://'
-  lib_dir_paths = []
-  if tez_lib_uris and tez_lib_uris.strip().find(hdfs_path_prefix, 0) != -1:
-    dir_paths = tez_lib_uris.split(',')
-    for path in dir_paths:
-      if not "tez.tar.gz" in path:
-        lib_dir_path = path.replace(hdfs_path_prefix, '')
-        lib_dir_path = lib_dir_path if lib_dir_path.endswith(os.sep) else lib_dir_path + os.sep
-        lib_dir_paths.append(lib_dir_path)
-      else:
-        lib_dir_path = path.replace(hdfs_path_prefix, '')
-        lib_dir_paths.append(os.path.dirname(lib_dir_path))
-    pass
-  pass
-
-  return lib_dir_paths

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/mapred_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/mapred_service_check.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/mapred_service_check.py
deleted file mode 100644
index 6288ac0..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/mapred_service_check.py
+++ /dev/null
@@ -1,172 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
-from resource_management.libraries.functions.format import format
-from resource_management.core.resources.system import Execute, File
-from resource_management.core.source import StaticFile
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from resource_management.core.logger import Logger
-
-
-class MapReduce2ServiceCheck(Script):
-  def service_check(self, env):
-    pass
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class MapReduce2ServiceCheckWindows(MapReduce2ServiceCheck):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-
-    component_type = 'hs'
-    if params.hadoop_ssl_enabled:
-      component_address = params.hs_webui_address
-    else:
-      component_address = params.hs_webui_address
-
-    validateStatusFileName = "validateYarnComponentStatusWindows.py"
-    validateStatusFilePath = os.path.join(os.path.dirname(params.hadoop_home), "temp", validateStatusFileName)
-    python_executable = sys.executable
-    validateStatusCmd = "{0} {1} {2} -p {3} -s {4}".format(
-      python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
-
-    if params.security_enabled:
-      kinit_cmd = "{0} -kt {1} {2};".format(params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
-      smoke_cmd = kinit_cmd + validateStatusCmd
-    else:
-      smoke_cmd = validateStatusCmd
-
-    File(validateStatusFilePath,
-         content=StaticFile(validateStatusFileName)
-    )
-
-    Execute(smoke_cmd,
-            tries=3,
-            try_sleep=5,
-            logoutput=True
-    )
-
-    # hadoop_exe = os.path.join(params.hadoop_home, "bin", "hadoop")
-    #
-    # tested_file = os.path.join(params.hadoop_home, "bin", "hadoop.cmd")
-    # jar_path = os.path.join(params.hadoop_mapred2_jar_location, params.hadoopMapredExamplesJarName)
-    # input_file = format("/user/hadoop/mapredsmokeinput")
-    # output_file = format("/user/hadoop/mapredsmokeoutput")
-    # cleanup_cmd = format("cmd /C {hadoop_exe} fs -rm -r -f {output_file} {input_file}")
-    # create_file_cmd = format("cmd /C {hadoop_exe} fs -put {tested_file} {input_file}")
-    # run_wordcount_job = format("cmd /C {hadoop_exe} jar {jar_path} wordcount {input_file} {output_file}")
-    # test_cmd = format("cmd /C {hadoop_exe} fs -test -e {output_file}")
-    #
-    # if params.security_enabled:
-    #   kinit_cmd = "{0} -kt {1} {2};".format(kinit_path_local, smoke_user_keytab, smokeuser)
-    #   Execute(kinit_cmd)
-    #
-    # Execute(cleanup_cmd,
-    #         tries=1,
-    #         try_sleep=5,
-    #         logoutput=True,
-    #         user=params.hdfs_user
-    # )
-    #
-    # Execute(create_file_cmd,
-    #         tries=1,
-    #         try_sleep=5,
-    #         logoutput=True,
-    #         user=params.hdfs_user
-    # )
-    #
-    # Execute(run_wordcount_job,
-    #         tries=1,
-    #         try_sleep=5,
-    #         logoutput=True,
-    #         user=params.hdfs_user
-    # )
-    #
-    # Execute(test_cmd,
-    #         logoutput=True,
-    #         user=params.hdfs_user
-    # )
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class MapReduce2ServiceCheckDefault(MapReduce2ServiceCheck):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
-    input_file = format("/user/{smokeuser}/mapredsmokeinput")
-    output_file = format("/user/{smokeuser}/mapredsmokeoutput")
-
-    test_cmd = format("fs -test -e {output_file}")
-    run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
-
-    params.HdfsResource(format("/user/{smokeuser}"),
-                      type="directory",
-                      action="create_on_execute",
-                      owner=params.smokeuser,
-                      mode=params.smoke_hdfs_user_mode,
-    )
-    params.HdfsResource(output_file,
-                        action = "delete_on_execute",
-                        type = "directory",
-                        dfs_type = params.dfs_type,
-    )
-    params.HdfsResource(input_file,
-                        action = "create_on_execute",
-                        type = "file",
-                        source = "/etc/passwd",
-                        dfs_type = params.dfs_type,
-    )
-    params.HdfsResource(None, action="execute")
-
-    # initialize the ticket
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
-      Execute(kinit_cmd, user=params.smokeuser)
-
-    ExecuteHadoop(run_wordcount_job,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  bin_dir=params.execute_path,
-                  conf_dir=params.hadoop_conf_dir,
-                  logoutput=True)
-
-    # the ticket may have expired, so re-initialize
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
-      Execute(kinit_cmd, user=params.smokeuser)
-
-    ExecuteHadoop(test_cmd,
-                  user=params.smokeuser,
-                  bin_dir=params.execute_path,
-                  conf_dir=params.hadoop_conf_dir)
-
-
-if __name__ == "__main__":
-  MapReduce2ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/mapreduce2_client.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/mapreduce2_client.py
deleted file mode 100644
index 424157b..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/mapreduce2_client.py
+++ /dev/null
@@ -1,98 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-# Python imports
-import os
-import sys
-
-# Local imports
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.core.exceptions import ClientComponentHasNoStatus
-from yarn import yarn
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from resource_management.core.logger import Logger
-
-
-class MapReduce2Client(Script):
-  def install(self, env):
-    import params
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env, config_dir=None, upgrade_type=None):
-    """
-    :param env: Python environment
-    :param config_dir: During rolling upgrade, which config directory to save configs to.
-    """
-    import params
-    env.set_params(params)
-    yarn(config_dir=config_dir)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-  def stack_upgrade_save_new_config(self, env):
-    """
-    Because this gets called during a Rolling Upgrade, the new mapreduce configs have already been saved, so we must be
-    careful to only call configure() on the directory of the new version.
-    :param env:
-    """
-    import params
-    env.set_params(params)
-
-    conf_select_name = "hadoop"
-    base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-    config_dir = self.get_config_dir_during_stack_upgrade(env, base_dir, conf_select_name)
-
-    if config_dir:
-      Logger.info("stack_upgrade_save_new_config(): Calling conf-select on %s using version %s" % (conf_select_name, str(params.version)))
-
-      # Because this script was called from ru_execute_tasks.py which already enters an Environment with its own basedir,
-      # must change it now so this function can find the Jinja Templates for the service.
-      env.config.basedir = base_dir
-      conf_select.select(params.stack_name, conf_select_name, params.version)
-      self.configure(env, config_dir=config_dir)
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class MapReduce2ClientWindows(MapReduce2Client):
-  pass
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class MapReduce2ClientDefault(MapReduce2Client):
-  def get_component_name(self):
-    return "hadoop-client"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-client", params.version)
-
-
-if __name__ == "__main__":
-  MapReduce2Client().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py
deleted file mode 100644
index 133d2e1..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py
+++ /dev/null
@@ -1,166 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import nodemanager_upgrade
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-from resource_management.core.logger import Logger
-from yarn import yarn
-from service import service
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-
-
-class Nodemanager(Script):
-  def install(self, env):
-    self.install_packages(env)
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    service('nodemanager',action='stop')
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('nodemanager',action='start')
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn(name="nodemanager")
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class NodemanagerWindows(Nodemanager):
-  def status(self, env):
-    service('nodemanager', action='status')
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class NodemanagerDefault(Nodemanager):
-  def get_component_name(self):
-    return "hadoop-yarn-nodemanager"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing NodeManager Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-yarn-nodemanager", params.version)
-
-  def post_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing NodeManager Stack Upgrade post-restart")
-    import params
-    env.set_params(params)
-
-    nodemanager_upgrade.post_upgrade_check()
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.nodemanager_pid_file)
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.nodemanager.principal",
-                           "yarn.nodemanager.keytab",
-                           "yarn.nodemanager.webapp.spnego-principal",
-                           "yarn.nodemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.nodemanager.keytab",
-                          "yarn.nodemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.nodemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.nodemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.nodemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.nodemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.keytab'],
-                                security_params['yarn-site']['yarn.nodemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-  def get_log_folder(self):
-    import params
-    return params.yarn_log_dir
-  
-  def get_user(self):
-    import params
-    return params.yarn_user
-
-  def get_pid_files(self):
-    import status_params
-    return [status_params.nodemanager_pid_file]
-
-if __name__ == "__main__":
-  Nodemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager_upgrade.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager_upgrade.py
deleted file mode 100644
index 22cd8cc..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager_upgrade.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import subprocess
-
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.core.resources.system import Execute
-from resource_management.core import shell
-from resource_management.libraries.functions.decorator import retry
-from resource_management.libraries.functions.show_logs import show_logs
-from resource_management.libraries.functions.format import format
-
-
-def post_upgrade_check():
-  '''
-  Checks that the NodeManager has rejoined the cluster.
-  This function will obtain the Kerberos ticket if security is enabled.
-  :return:
-  '''
-  import params
-
-  Logger.info('NodeManager executing "yarn node -list -states=RUNNING" to verify the node has rejoined the cluster...')
-  if params.security_enabled and params.nodemanager_kinit_cmd:
-    Execute(params.nodemanager_kinit_cmd, user=params.yarn_user)
-
-  try:
-    _check_nodemanager_startup()
-  except Fail:
-    show_logs(params.yarn_log_dir, params.yarn_user)
-    raise
-    
-
-@retry(times=30, sleep_time=10, err_class=Fail)
-def _check_nodemanager_startup():
-  '''
-  Checks that a NodeManager is in a RUNNING state in the cluster via
-  "yarn node -list -states=RUNNING" command. Once the NodeManager is found to be
-  alive this method will return, otherwise it will raise a Fail(...) and retry
-  automatically.
-  :return:
-  '''
-  import params
-  import socket
-
-  command = 'yarn node -list -states=RUNNING'
-  return_code, yarn_output = shell.checked_call(command, user=params.yarn_user)
-  
-  hostname = params.hostname.lower()
-  hostname_ip = socket.gethostbyname(params.hostname.lower())
-  nodemanager_address = params.nm_address.lower()
-  yarn_output = yarn_output.lower()
-
-  if hostname in yarn_output or nodemanager_address in yarn_output or hostname_ip in yarn_output:
-    Logger.info('NodeManager with ID \'{0}\' has rejoined the cluster.'.format(nodemanager_address))
-    return
-  else:
-    raise Fail('NodeManager with ID \'{0}\' was not found in the list of running NodeManagers. \'{1}\' output was:\n{2}'.format(nodemanager_address, command, yarn_output))

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params.py
deleted file mode 100644
index d0ad6f6..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params.py
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from ambari_commons import OSCheck
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.copy_tarball import get_sysprep_skip_copy_tarballs_hdfs
-
-if OSCheck.is_windows_family():
-  from params_windows import *
-else:
-  from params_linux import *
-
-sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()
-retryAble = default("/commandParams/command_retry_enabled", False)


[03/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index c7ce416..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in exclude_hosts %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/mapreduce.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/mapreduce.conf.j2 b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/mapreduce.conf.j2
deleted file mode 100644
index ae8e6d5..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/mapreduce.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{mapred_user}}   - nofile {{mapred_user_nofile_limit}}
-{{mapred_user}}   - nproc  {{mapred_user_nproc_limit}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/taskcontroller.cfg.j2
deleted file mode 100644
index 3d5f4f2..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/taskcontroller.cfg.j2
+++ /dev/null
@@ -1,38 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-mapred.local.dir={{mapred_local_dir}}
-mapreduce.tasktracker.group={{mapred_tt_group}}
-hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/yarn.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/yarn.conf.j2 b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/yarn.conf.j2
deleted file mode 100644
index 1063099..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/yarn.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{yarn_user}}   - nofile {{yarn_user_nofile_limit}}
-{{yarn_user}}   - nproc  {{yarn_user_nproc_limit}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/quicklinks-mapred/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/quicklinks-mapred/quicklinks.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/quicklinks-mapred/quicklinks.json
deleted file mode 100644
index 5ffbc07..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/quicklinks-mapred/quicklinks.json
+++ /dev/null
@@ -1,80 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol":
-    {
-      "type":"https",
-      "checks":[
-        {
-          "property":"mapreduce.jobhistory.http.policy",
-          "desired":"HTTPS_ONLY",
-          "site":"mapred-site"
-        }
-      ]
-    },
-
-    "links": [
-      {
-        "name": "jobhistory_ui",
-        "label": "JobHistory UI",
-        "requires_user_name": "false",
-        "component_name": "HISTORYSERVER",
-        "url": "%@://%@:%@",
-        "port":{
-          "http_property": "mapreduce.jobhistory.webapp.address",
-          "http_default_port": "19888",
-          "https_property": "mapreduce.jobhistory.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "mapred-site"
-        }
-      },
-      {
-        "name": "jobhistory_logs",
-        "label": "JobHistory logs",
-        "requires_user_name": "false",
-        "component_name": "HISTORYSERVER",
-        "url": "%@://%@:%@/logs",
-        "port":{
-          "http_property": "mapreduce.jobhistory.webapp.address",
-          "http_default_port": "19888",
-          "https_property": "mapreduce.jobhistory.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "mapred-site"
-        }
-      },
-      {
-        "name":"jobhistory_jmx",
-        "label":"JobHistory JMX",
-        "requires_user_name":"false",
-        "component_name": "HISTORYSERVER",
-        "url":"%@://%@:%@/jmx",
-        "port":{
-          "http_property": "mapreduce.jobhistory.webapp.address",
-          "http_default_port": "19888",
-          "https_property": "mapreduce.jobhistory.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "mapred-site"
-        }
-      },
-      {
-        "name":"thread_stacks",
-        "label":"Thread Stacks",
-        "requires_user_name": "false",
-        "component_name": "HISTORYSERVER",
-        "url":"%@://%@:%@/stacks",
-        "port":{
-          "http_property": "mapreduce.jobhistory.webapp.address",
-          "http_default_port": "19888",
-          "https_property": "mapreduce.jobhistory.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "mapred-site"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/quicklinks/quicklinks.json
deleted file mode 100644
index 37248d0..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/quicklinks/quicklinks.json
+++ /dev/null
@@ -1,80 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol":
-    {
-      "type":"https",
-      "checks":[
-        {
-          "property":"yarn.http.policy",
-          "desired":"HTTPS_ONLY",
-          "site":"yarn-site"
-        }
-      ]
-    },
-
-    "links": [
-      {
-        "name": "resourcemanager_ui",
-        "label": "ResourceManager UI",
-        "requires_user_name": "false",
-        "component_name": "RESOURCEMANAGER",
-        "url": "%@://%@:%@",
-        "port":{
-          "http_property": "yarn.resourcemanager.webapp.address",
-          "http_default_port": "8088",
-          "https_property": "yarn.resourcemanager.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "yarn-site"
-        }
-      },
-      {
-        "name": "resourcemanager_logs",
-        "label": "ResourceManager logs",
-        "requires_user_name": "false",
-        "component_name": "RESOURCEMANAGER",
-        "url": "%@://%@:%@/logs",
-        "port":{
-          "http_property": "yarn.resourcemanager.webapp.address",
-          "http_default_port": "8088",
-          "https_property": "yarn.resourcemanager.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "yarn-site"
-        }
-      },
-      {
-        "name": "resourcemanager_jmx",
-        "label":"ResourceManager JMX",
-        "requires_user_name": "false",
-        "component_name": "RESOURCEMANAGER",
-        "url":"%@://%@:%@/jmx",
-        "port":{
-          "http_property": "yarn.resourcemanager.webapp.address",
-          "http_default_port": "8088",
-          "https_property": "yarn.resourcemanager.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "yarn-site"
-        }
-      },
-      {
-        "name": "thread_stacks",
-        "label":"Thread Stacks",
-        "requires_user_name": "false",
-        "component_name": "RESOURCEMANAGER",
-        "url":"%@://%@:%@/stacks",
-        "port":{
-          "http_property": "yarn.resourcemanager.webapp.address",
-          "http_default_port": "8088",
-          "https_property": "yarn.resourcemanager.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "yarn-site"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/themes-mapred/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/themes-mapred/theme.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/themes-mapred/theme.json
deleted file mode 100644
index 5019447..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/themes-mapred/theme.json
+++ /dev/null
@@ -1,132 +0,0 @@
-{
-  "name": "default",
-  "description": "Default theme for MAPREDUCE service",
-  "configuration": {
-    "layouts": [
-      {
-        "name": "default",
-        "tabs": [
-          {
-            "name": "settings",
-            "display-name": "Settings",
-            "layout": {
-              "tab-columns": "1",
-              "tab-rows": "1",
-              "sections": [
-                {
-                  "name": "section-mr-scheduler",
-                  "display-name": "MapReduce",
-                  "row-index": "0",
-                  "column-index": "0",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "3",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-mr-scheduler-row1-col1",
-                      "display-name": "MapReduce Framework",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    },
-                    {
-                      "name": "subsection-mr-scheduler-row1-col2",
-                      "row-index": "0",
-                      "column-index": "1",
-                      "row-span": "1",
-                      "column-span": "1"
-                    },
-                    {
-                      "name": "subsection-mr-scheduler-row1-col3",
-                      "row-index": "0",
-                      "column-index": "2",
-                      "row-span": "1",
-                      "column-span": "1"
-                    },
-                    {
-                      "name": "subsection-mr-scheduler-row2-col1",
-                      "display-name": "MapReduce AppMaster",
-                      "row-index": "1",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "3"
-                    }
-                  ]
-                }
-              ]
-            }
-          }
-        ]
-      }
-    ],
-    "placement": {
-      "configuration-layout": "default",
-      "configs": [
-        {
-          "config": "mapred-site/mapreduce.map.memory.mb",
-          "subsection-name": "subsection-mr-scheduler-row1-col1"
-        },
-        {
-          "config": "mapred-site/mapreduce.reduce.memory.mb",
-          "subsection-name": "subsection-mr-scheduler-row1-col2"
-        },
-        {
-          "config": "mapred-site/yarn.app.mapreduce.am.resource.mb",
-          "subsection-name": "subsection-mr-scheduler-row2-col1"
-        },
-        {
-          "config": "mapred-site/mapreduce.task.io.sort.mb",
-          "subsection-name": "subsection-mr-scheduler-row1-col3"
-        }
-      ]
-    },
-    "widgets": [
-      {
-        "config": "mapred-site/mapreduce.map.memory.mb",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "mapred-site/mapreduce.reduce.memory.mb",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "mapred-site/yarn.app.mapreduce.am.resource.mb",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "GB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "mapred-site/mapreduce.task.io.sort.mb",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "MB"
-            }
-          ]
-        }
-      }
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/themes/theme.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/themes/theme.json
deleted file mode 100644
index 758cf0c..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/themes/theme.json
+++ /dev/null
@@ -1,250 +0,0 @@
-{
-  "name": "default",
-  "description": "Default theme for YARN service",
-  "configuration": {
-    "layouts": [
-      {
-        "name": "default",
-        "tabs": [
-          {
-            "name": "settings",
-            "display-name": "Settings",
-            "layout": {
-              "tab-columns": "3",
-              "tab-rows": "2",
-              "sections": [
-                {
-                  "name": "section-nm-sizing",
-                  "display-name": "Memory",
-                  "row-index": "0",
-                  "column-index": "0",
-                  "row-span": "1",
-                  "column-span": "2",
-                  "section-columns": "2",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-nm-sizing-col1",
-                      "display-name": "Node",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    },
-                    {
-                      "name": "subsection-nm-sizing-col2",
-                      "display-name": "Container",
-                      "row-index": "0",
-                      "column-index": "1",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                },
-                {
-                  "name": "section-yarn-platform-features",
-                  "display-name": "YARN Features",
-                  "row-index": "0",
-                  "column-index": "2",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-yarn-platform-features-col1",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                },
-                {
-                  "name": "section-container-sizing",
-                  "display-name": "CPU",
-                  "row-index": "1",
-                  "column-index": "0",
-                  "row-span": "1",
-                  "column-span": "2",
-                  "section-columns": "2",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-container-sizing-col1",
-                      "display-name": "Node",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    },
-                    {
-                      "name": "subsection-container-sizing-col2",
-                      "display-name": "Container",
-                      "row-index": "0",
-                      "column-index": "1",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                }
-              ]
-            }
-          }
-        ]
-      }
-    ],
-    "placement": {
-      "configuration-layout": "default",
-      "configs": [
-        {
-          "config": "yarn-site/yarn.nodemanager.resource.memory-mb",
-          "subsection-name": "subsection-nm-sizing-col1"
-        },
-        {
-          "config": "yarn-site/yarn.scheduler.minimum-allocation-mb",
-          "subsection-name": "subsection-nm-sizing-col2"
-        },
-        {
-          "config": "yarn-site/yarn.scheduler.maximum-allocation-mb",
-          "subsection-name": "subsection-nm-sizing-col2"
-        },
-        {
-          "config": "yarn-site/yarn.node-labels.enabled",
-          "subsection-name": "subsection-yarn-platform-features-col1"
-        },
-        {
-          "config": "yarn-site/yarn.resourcemanager.scheduler.monitor.enable",
-          "subsection-name": "subsection-yarn-platform-features-col1"
-        },
-        {
-          "config": "capacity-scheduler/yarn.scheduler.capacity.resource-calculator",
-          "subsection-name": "subsection-container-sizing-col1"
-        },
-        {
-          "config": "yarn-env/yarn_cgroups_enabled",
-          "subsection-name": "subsection-container-sizing-col1"
-        },
-        {
-          "config": "yarn-site/yarn.nodemanager.resource.percentage-physical-cpu-limit",
-          "subsection-name": "subsection-container-sizing-col1"
-        },
-        {
-          "config": "yarn-site/yarn.nodemanager.resource.cpu-vcores",
-          "subsection-name": "subsection-container-sizing-col1"
-        },
-        {
-          "config": "yarn-site/yarn.scheduler.minimum-allocation-vcores",
-          "subsection-name": "subsection-container-sizing-col2"
-        },
-        {
-          "config": "yarn-site/yarn.scheduler.maximum-allocation-vcores",
-          "subsection-name": "subsection-container-sizing-col2"
-        }
-      ]
-    },
-    "widgets": [
-      {
-        "config": "yarn-site/yarn.nodemanager.resource.memory-mb",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "MB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "yarn-site/yarn.nodemanager.resource.percentage-physical-cpu-limit",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "percent"
-            }
-          ]
-        }
-      },
-      {
-        "config": "yarn-site/yarn.nodemanager.resource.cpu-vcores",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "yarn-site/yarn.scheduler.minimum-allocation-mb",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "MB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "yarn-site/yarn.scheduler.maximum-allocation-mb",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "MB"
-            }
-          ]
-        }
-      },
-      {
-        "config": "yarn-site/yarn.scheduler.minimum-allocation-vcores",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "yarn-site/yarn.scheduler.maximum-allocation-vcores",
-        "widget": {
-          "type": "slider",
-          "units": [
-            {
-              "unit-name": "int"
-            }
-          ]
-        }
-      },
-      {
-        "config": "yarn-site/yarn.node-labels.enabled",
-        "widget": {
-          "type": "toggle"
-        }
-      },
-      {
-        "config": "yarn-env/yarn_cgroups_enabled",
-        "widget": {
-          "type": "toggle"
-        }
-      },
-      {
-        "config": "yarn-site/yarn.resourcemanager.scheduler.monitor.enable",
-        "widget": {
-          "type": "toggle"
-        }
-      },
-      {
-        "config": "capacity-scheduler/yarn.scheduler.capacity.resource-calculator",
-        "widget": {
-          "type": "toggle"
-        }
-      }
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.9/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.9/metainfo.xml b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.9/metainfo.xml
deleted file mode 100644
index 79bf5f1..0000000
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.9/metainfo.xml
+++ /dev/null
@@ -1,51 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>ZOOKEEPER</name>
-      <extends>common-services/ZOOKEEPER/3.4.6</extends>
-      <version>3.4.9</version>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>amazon2015,redhat6,redhat7,suse11,suse12</osFamily>
-          <packages>
-            <package>
-              <name>zookeeper_${stack_version}</name>
-            </package>
-            <package>
-              <name>zookeeper_${stack_version}-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>ubuntu12,ubuntu14,ubuntu16</osFamily>
-          <packages>
-            <package>
-              <name>zookeeper-${stack_version}</name>
-            </package>
-            <package>
-              <name>zookeeper-${stack_version}-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
deleted file mode 100644
index 93680bf..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
+++ /dev/null
@@ -1,293 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>recovery_enabled</name>
-    <value>true</value>
-    <description>Auto start enabled or not for this cluster.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>recovery_type</name>
-    <value>AUTO_START</value>
-    <description>Auto start type.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>recovery_lifetime_max_count</name>
-    <value>1024</value>
-    <description>Auto start lifetime maximum count of recovery attempt allowed per host component. This is reset when agent is restarted.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>recovery_max_count</name>
-    <value>6</value>
-    <description>Auto start maximum count of recovery attempt allowed per host component in a window. This is reset when agent is restarted.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>recovery_window_in_minutes</name>
-    <value>60</value>
-    <description>Auto start recovery window size in minutes.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>recovery_retry_interval</name>
-    <value>5</value>
-    <description>Auto start recovery retry gap between tries per host component.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ignore_groupsusers_create</name>
-    <display-name>Skip group modifications during install</display-name>
-    <value>false</value>
-    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
-    <description>Whether to ignore failures on users and group creation</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>sysprep_skip_create_users_and_groups</name>
-    <display-name>Whether to skip creating users and groups in a sysprepped cluster</display-name>
-    <value>false</value>
-    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
-    <description>Whether to skip creating users and groups in a sysprepped cluster</description>
-    <value-attributes>
-      <overridable>true</overridable>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>sysprep_skip_copy_fast_jar_hdfs</name>
-    <display-name>Whether to skip copying the tarballs to HDFS on a sysprepped cluster</display-name>
-    <value>false</value>
-    <description>Whether to skip copying the tarballs to HDFS on a sysprepped cluster, during both fresh install and stack upgrade</description>
-    <value-attributes>
-      <overridable>true</overridable>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>sysprep_skip_copy_tarballs_hdfs</name>
-    <display-name>Whether to skip copying the tarballs to HDFS on a sysprepped cluster</display-name>
-    <value>false</value>
-    <description>Whether to skip copying the tarballs to HDFS on a sysprepped cluster, during both fresh install and stack upgrade</description>
-    <value-attributes>
-      <overridable>true</overridable>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>sysprep_skip_copy_oozie_share_lib_to_hdfs</name>
-    <display-name>Whether to skip copying the Oozie share lib to HDFS on sysprepped cluster</display-name>
-    <value>false</value>
-    <description>Whether to skip copying the Oozie share lib to HDFS on sysprepped cluster, during both fresh install and stack upgrade</description>
-    <value-attributes>
-      <overridable>true</overridable>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>smokeuser</name>
-    <display-name>Smoke User</display-name>
-    <value>ambari-qa</value>
-    <property-type>USER</property-type>
-    <description>User executing service checks</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>smokeuser_keytab</name>
-    <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
-    <description>Path to smoke test user keytab file</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>user_group</name>
-    <display-name>Hadoop Group</display-name>
-    <value>hadoop</value>
-    <property-type>GROUP</property-type>
-    <description>Hadoop user group.</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>repo_suse_rhel_template</name>
-    <value>[{{repo_id}}]
-name={{repo_id}}
-{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}
-
-path=/
-enabled=1
-gpgcheck=0</value>
-    <description>Template of repositories for rhel and suse.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>repo_ubuntu_template</name>
-    <value>{{package_type}} {{base_url}} {{components}}</value>
-    <description>Template of repositories for ubuntu.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>override_uid</name>
-    <value>true</value>
-    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
-    <display-name>Have Ambari manage UIDs</display-name>
-    <description>Have Ambari manage UIDs</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>fetch_nonlocal_groups</name>
-    <value>true</value>
-    <display-name>Ambari fetch nonlocal groups</display-name>
-    <description>Ambari requires fetching all the groups. This can be slow
-        on envs with enabled ldap. Setting this option to false will enable Ambari,
-        to skip user/group management connected with ldap groups.</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>managed_hdfs_resource_property_names</name>
-    <value/>
-    <description>Comma separated list of property names with HDFS resource paths.
-        Resource from this list will be managed even if it is marked as not managed in the stack</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
-  <property>
-    <name>stack_tools</name>
-    <value/>
-    <description>Stack specific tools</description>
-    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
-    <value-attributes>
-      <property-file-name>stack_tools.json</property-file-name>
-      <property-file-type>json</property-file-type>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- Define stack_features property in the base stack. DO NOT override this property for each stack version -->
-  <property>
-    <name>stack_features</name>
-    <value/>
-    <description>List of features supported by the stack</description>
-    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
-    <value-attributes>
-      <property-file-name>stack_features.json</property-file-name>
-      <property-file-type>json</property-file-type>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>stack_root</name>
-    <value>/usr/hdp</value>
-    <description>Stack root folder</description>
-    <value-attributes>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>alerts_repeat_tolerance</name>
-    <value>1</value>
-    <description>The number of consecutive alerts required to transition an alert from the SOFT to the HARD state.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ignore_bad_mounts</name>
-    <value>false</value>
-    <description>For properties handled by handle_mounted_dirs this will make Ambari not to create any directories.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>manage_dirs_on_root</name>
-    <value>true</value>
-    <description>For properties handled by handle_mounted_dirs this will make Ambari to manage (create and set permissions) unknown directories on / partition</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>one_dir_per_partition</name>
-    <value>false</value>
-    <description>For properties handled by handle_mounted_dirs this will make Ambari </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hide_yarn_memory_widget</name>
-    <value>false</value>
-    <description>YARN Memory widget should be hidden by default on the dashboard.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-   <property>
-    <name>agent_mounts_ignore_list</name>
-    <value/>
-    <description>Comma separated list of the mounts which would be ignored by Ambari during property values suggestion by Stack Advisor</description>
-    <on-ambari-upgrade add="false"/>
-    <value-attributes>
-      <visible>true</visible>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
deleted file mode 100644
index 8a583b3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script.hook import Hook
-from shared_initialization import link_configs
-from shared_initialization import setup_config
-from shared_initialization import setup_stack_symlinks
-
-class AfterInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_stack_symlinks()
-    setup_config()
-
-    link_configs(self.stroutfile)
-
-if __name__ == "__main__":
-  AfterInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
deleted file mode 100644
index 566f5b3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.version import format_stack_version
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-dfs_type = default("/commandParams/dfs_type", "")
-stack_root = Script.get_stack_root()
-
-is_parallel_execution_enabled = int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-sudo = AMBARI_SUDO_BINARY
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-# current host stack version
-current_version = default("/hostLevelParams/current_version", None)
-
-# default hadoop params
-mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_conf_empty_dir = None
-
-versioned_stack_root = format('{stack_root}/current')
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#users and groups
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['cluster-env']['user_group']
-
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-has_namenode = not len(namenode_host) == 0
-
-if has_namenode or dfs_type == 'HCFS':
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
-
-link_configs_lock_file = os.path.join(tmp_dir, "link_configs_lock_file")
-stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
-
-upgrade_suspended = default("/roleParams/upgrade_suspended", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
deleted file mode 100644
index e9f2283..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,111 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-import ambari_simplejson as json
-from resource_management.core.logger import Logger
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
-from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.libraries.script import Script
-
-
-def setup_stack_symlinks():
-  """
-  Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a
-  stack version, such as "2.3". This should always be called after a component has been
-  installed to ensure that all HDP pointers are correct. The stack upgrade logic does not
-  interact with this since it's done via a custom command and will not trigger this hook.
-  :return:
-  """
-  import params
-  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
-    # try using the exact version first, falling back in just the stack if it's not defined
-    # which would only be during an intial cluster installation
-    version = params.current_version if params.current_version is not None else params.stack_version_unformatted
-
-    if not params.upgrade_suspended:
-      if params.host_sys_prepped:
-        Logger.warning("Skipping running stack-selector-tool for stack {0} as its a sys_prepped host. This may cause symlink pointers not to be created for HDP componets installed later on top of an already sys_prepped host.".format(version))
-        return
-      # On parallel command execution this should be executed by a single process at a time.
-      with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-        stack_select.select_all(version)
-
-def setup_config():
-  import params
-  stackversion = params.stack_version_unformatted
-  Logger.info("FS Type: {0}".format(params.dfs_type))
-
-  is_hadoop_conf_dir_present = False
-  if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir):
-    is_hadoop_conf_dir_present = True
-  else:
-    Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
-
-  if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
-    # create core-site only if the hadoop config diretory exists
-    XmlConfig("core-site.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['core-site'],
-              configuration_attributes=params.config['configuration_attributes']['core-site'],
-              owner=params.hdfs_user,
-              group=params.user_group,
-              only_if=format("ls {hadoop_conf_dir}"))
-
-
-def load_version(struct_out_file):
-  """
-  Load version from file.  Made a separate method for testing
-  """
-  json_version = None
-  try:
-    if os.path.exists(struct_out_file):
-      with open(struct_out_file, 'r') as fp:
-        json_info = json.load(fp)
-        json_version = json_info['version']
-  except:
-    pass
-
-  return json_version
-  
-
-def link_configs(struct_out_file):
-  """
-  Links configs, only on a fresh install of HDP-2.3 and higher
-  """
-  import params
-
-  if not Script.is_stack_greater_or_equal("2.3"):
-    Logger.info("Can only link configs for HDP-2.3 and higher.")
-    return
-
-  json_version = load_version(struct_out_file)
-
-  if not json_version:
-    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
-    return
-
-  # On parallel command execution this should be executed by a single process at a time.
-  with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-    for k, v in conf_select.get_package_dirs().iteritems():
-      conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh
deleted file mode 100644
index 08542c4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-username=$1
-directories=$2
-
-function find_available_uid() {
- for ((i=1001; i<=2000; i++))
- do
-   grep -q $i /etc/passwd
-   if [ "$?" -ne 0 ]
-   then
-    newUid=$i
-    break
-   fi
- done
-}
-
-find_available_uid
-
-if [ $newUid -eq 0 ]
-then
-  echo "Failed to find Uid between 1000 and 2000"
-  exit 1
-fi
-
-set -e
-
-dir_array=($(echo $directories | sed 's/,/\n/g'))
-old_uid=$(id -u $username)
-sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
-echo "Changing uid of $username from $old_uid to $newUid"
-echo "Changing directory permisions for ${dir_array[@]}"
-$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py
deleted file mode 100644
index c34be0b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from shared_initialization import *
-
-class BeforeAnyHook(Hook):
-
-  def hook(self, env):
-    import params
-    env.set_params(params)
-
-    setup_users()
-    if params.has_namenode or params.dfs_type == 'HCFS':
-      setup_hadoop_env()
-    setup_java()
-
-if __name__ == "__main__":
-  BeforeAnyHook().execute()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
deleted file mode 100644
index f70c8e9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
+++ /dev/null
@@ -1,231 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import collections
-import re
-import os
-
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.expect import expect
-from ambari_commons.os_check import OSCheck
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-dfs_type = default("/commandParams/dfs_type", "")
-stack_root = Script.get_stack_root()
-
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None)
-java_home = config['hostLevelParams']['java_home']
-java_version = expect("/hostLevelParams/java_version", int)
-jdk_location = config['hostLevelParams']['jdk_location']
-
-sudo = AMBARI_SUDO_BINARY
-
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", ""))
-version = default("/commandParams/version", None)
-# Handle upgrade and downgrade
-if (upgrade_type is not None) and version:
-  stack_version_formatted = format_stack_version(version)
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-
-# Some datanode settings
-dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
-dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
-dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
-dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
-secure_dn_ports_are_in_use = False
-
-def get_port(address):
-  """
-  Extracts port from the address like 0.0.0.0:1019
-  """
-  if address is None:
-    return None
-  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
-  if m is not None:
-    return int(m.group(2))
-  else:
-    return None
-
-def is_secure_port(port):
-  """
-  Returns True if port is root-owned at *nix systems
-  """
-  if port is not None:
-    return port < 1024
-  else:
-    return False
-
-# hadoop default params
-mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
-
-# upgrades would cause these directories to have a version instead of "current"
-# which would cause a lot of problems when writing out hadoop-env.sh; instead
-# force the use of "current" in the hook
-hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
-hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
-
-hadoop_conf_empty_dir = None
-hadoop_secure_dn_user = hdfs_user
-hadoop_dir = "/etc/hadoop"
-versioned_stack_root = format('{stack_root}/current')
-hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
-datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
-is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
-
-if not security_enabled:
-  hadoop_secure_dn_user = '""'
-else:
-  dfs_dn_port = get_port(dfs_dn_addr)
-  dfs_dn_http_port = get_port(dfs_dn_http_addr)
-  dfs_dn_https_port = get_port(dfs_dn_https_addr)
-  # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
-  if dfs_http_policy == "HTTPS_ONLY":
-    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
-  elif dfs_http_policy == "HTTP_AND_HTTPS":
-    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
-  else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
-    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
-  if secure_dn_ports_are_in_use:
-    hadoop_secure_dn_user = hdfs_user
-  else:
-    hadoop_secure_dn_user = '""'
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
-
-#users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
-oozie_user = config['configurations']['oozie-env']["oozie_user"]
-falcon_user = config['configurations']['falcon-env']["falcon_user"]
-ranger_user = config['configurations']['ranger-env']["ranger_user"]
-zeppelin_user = config['configurations']['zeppelin-env']["zeppelin_user"]
-zeppelin_group = config['configurations']['zeppelin-env']["zeppelin_group"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-zeppelin_master_hosts = default("/clusterHostInfo/zeppelin_master_hosts", [])
-
-has_namenode = not len(namenode_host) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_tez = 'tez-site' in config['configurations']
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_oozie_server = not len(oozie_servers) == 0
-has_falcon_server_hosts = not len(falcon_server_hosts) == 0
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-has_zeppelin_master = not len(zeppelin_master_hosts) == 0
-
-if has_namenode or dfs_type == 'HCFS':
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
-
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-ranger_group = config['configurations']['ranger-env']['ranger_group']
-dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
-
-sysprep_skip_create_users_and_groups = default("/configurations/cluster-env/sysprep_skip_create_users_and_groups", False)
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
-
-user_to_groups_dict = collections.defaultdict(lambda:[user_group])
-user_to_groups_dict[smoke_user] = [proxyuser_group]
-if has_ganglia_server:
-  user_to_groups_dict[gmond_user] = [gmond_user]
-  user_to_groups_dict[gmetad_user] = [gmetad_user]
-if has_tez:
-  user_to_groups_dict[tez_user] = [proxyuser_group]
-if has_oozie_server:
-  user_to_groups_dict[oozie_user] = [proxyuser_group]
-if has_falcon_server_hosts:
-  user_to_groups_dict[falcon_user] = [proxyuser_group]
-if has_ranger_admin:
-  user_to_groups_dict[ranger_user] = [ranger_group]
-if has_zeppelin_master:
-  user_to_groups_dict[zeppelin_user] = [zeppelin_group, user_group]
-
-user_to_gid_dict = collections.defaultdict(lambda:user_group)
-
-user_list = json.loads(config['hostLevelParams']['user_list'])
-group_list = json.loads(config['hostLevelParams']['group_list'])
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
-override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
deleted file mode 100644
index 320872e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
+++ /dev/null
@@ -1,226 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import re
-import getpass
-import tempfile
-from copy import copy
-from resource_management.libraries.functions.version import compare_versions
-from resource_management import *
-
-def setup_users():
-  """
-  Creates users before cluster installation
-  """
-  import params
-
-  should_create_users_and_groups = False
-  if params.host_sys_prepped:
-    should_create_users_and_groups = not params.sysprep_skip_create_users_and_groups
-  else:
-    should_create_users_and_groups = not params.ignore_groupsusers_create
-
-  if should_create_users_and_groups:
-    for group in params.group_list:
-      Group(group,
-      )
-
-    for user in params.user_list:
-      User(user,
-          gid = params.user_to_gid_dict[user],
-          groups = params.user_to_groups_dict[user],
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-      )
-
-    if params.override_uid == "true":
-      set_uid(params.smoke_user, params.smoke_user_dirs)
-    else:
-      Logger.info('Skipping setting uid for smoke user as host is sys prepped')
-  else:
-    Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
-    pass
-
-
-  if params.has_hbase_masters:
-    Directory (params.hbase_tmp_dir,
-               owner = params.hbase_user,
-               mode=0775,
-               create_parents = True,
-               cd_access="a",
-    )
-    if params.override_uid == "true":
-      set_uid(params.hbase_user, params.hbase_user_dirs)
-    else:
-      Logger.info('Skipping setting uid for hbase user as host is sys prepped')
-
-  if should_create_users_and_groups:
-    if params.has_namenode:
-      create_dfs_cluster_admins()
-    if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0:
-      create_tez_am_view_acls()
-  else:
-    Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
-
-def create_dfs_cluster_admins():
-  """
-  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
-  """
-  import params
-
-  groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
-
-  User(params.hdfs_user,
-    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-  )
-
-def create_tez_am_view_acls():
-
-  """
-  tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names>
-  """
-  import params
-
-  if not params.tez_am_view_acls.startswith("*"):
-    create_users_and_groups(params.tez_am_view_acls)
-
-def create_users_and_groups(user_and_groups):
-
-  import params
-
-  parts = re.split('\s', user_and_groups)
-  if len(parts) == 1:
-    parts.append("")
-
-  users_list = parts[0].split(",") if parts[0] else []
-  groups_list = parts[1].split(",") if parts[1] else []
-
-  if users_list:
-    User(users_list,
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-    )
-
-  if groups_list:
-    Group(copy(groups_list),
-    )
-  return groups_list
-    
-def set_uid(user, user_dirs):
-  """
-  user_dirs - comma separated directories
-  """
-  import params
-
-  File(format("{tmp_dir}/changeUid.sh"),
-       content=StaticFile("changeToSecureUid.sh"),
-       mode=0555)
-  ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
-  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs}"),
-          not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
-    
-def setup_hadoop_env():
-  import params
-  stackversion = params.stack_version_unformatted
-  Logger.info("FS Type: {0}".format(params.dfs_type))
-  if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-
-    # create /etc/hadoop
-    Directory(params.hadoop_dir, mode=0755)
-
-    # HDP < 2.2 used a conf -> conf.empty symlink for /etc/hadoop/
-    if Script.is_stack_less_than("2.2"):
-      Directory(params.hadoop_conf_empty_dir, create_parents = True, owner="root",
-        group=params.user_group )
-
-      Link(params.hadoop_conf_dir, to=params.hadoop_conf_empty_dir,
-         not_if=format("ls {hadoop_conf_dir}"))
-
-    # write out hadoop-env.sh, but only if the directory exists
-    if os.path.exists(params.hadoop_conf_dir):
-      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
-        group=params.user_group,
-        content=InlineTemplate(params.hadoop_env_sh_template))
-
-    # Create tmp dir for java.io.tmpdir
-    # Handle a situation when /tmp is set to noexec
-    Directory(params.hadoop_java_io_tmpdir,
-              owner=params.hdfs_user,
-              group=params.user_group,
-              mode=01777
-    )
-
-def setup_java():
-  """
-  Installs jdk using specific params, that comes from ambari-server
-  """
-  import params
-
-  java_exec = format("{java_home}/bin/java")
-
-  if not os.path.isfile(java_exec):
-    if not params.jdk_name: # if custom jdk is used.
-      raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
-
-    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
-    java_dir = os.path.dirname(params.java_home)
-
-    Directory(params.artifact_dir,
-              create_parents = True,
-              )
-
-    File(jdk_curl_target,
-         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
-         not_if = format("test -f {jdk_curl_target}")
-    )
-
-    tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
-
-    try:
-      if params.jdk_name.endswith(".bin"):
-        chmod_cmd = ("chmod", "+x", jdk_curl_target)
-        install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
-      elif params.jdk_name.endswith(".gz"):
-        chmod_cmd = ("chmod","a+x", java_dir)
-        install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
-
-      Directory(java_dir
-      )
-
-      Execute(chmod_cmd,
-              sudo = True,
-              )
-
-      Execute(install_cmd,
-              )
-
-    finally:
-      Directory(tmp_java_dir, action="delete")
-
-    File(format("{java_home}/bin/java"),
-         mode=0755,
-         cd_access="a",
-         )
-    Execute(('chmod', '-R', '755', params.java_home),
-      sudo = True,
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py
deleted file mode 100644
index ce17776..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-from repo_initialization import *
-
-class BeforeInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-    
-    install_repos()
-    install_packages()
-
-if __name__ == "__main__":
-  BeforeInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
deleted file mode 100644
index 6193c11..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,113 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.functions.version import format_stack_version, compare_versions
-from resource_management.core.system import System
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import default, format
-from resource_management.libraries.functions.expect import expect
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-sudo = AMBARI_SUDO_BINARY
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
-agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-#users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-
-# repo templates
-repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_template']
-repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
-
-#hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
-falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
-
-has_sqoop_client = 'sqoop-env' in config['configurations']
-has_namenode = not len(namenode_host) == 0
-has_hs = not len(hs_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_storm_server = not len(storm_server_hosts) == 0
-has_falcon_server = not len(falcon_host) == 0
-has_tez = 'tez-site' in config['configurations']
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-jce_location = config['hostLevelParams']['jdk_location']
-jdk_location = config['hostLevelParams']['jdk_location']
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py
deleted file mode 100644
index a35dce7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons.os_check import OSCheck
-from resource_management.libraries.resources.repository import Repository
-from resource_management.core.logger import Logger
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-# components_lits = repoName + postfix
-_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
-
-def _alter_repo(action, repo_string, repo_template):
-  """
-  @param action: "delete" or "create"
-  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
-  """
-  repo_dicts = json.loads(repo_string)
-
-  if not isinstance(repo_dicts, list):
-    repo_dicts = [repo_dicts]
-
-  if 0 == len(repo_dicts):
-    Logger.info("Repository list is empty. Ambari may not be managing the repositories.")
-  else:
-    Logger.info("Initializing {0} repositories".format(str(len(repo_dicts))))
-
-  for repo in repo_dicts:
-    if not 'baseUrl' in repo:
-      repo['baseUrl'] = None
-    if not 'mirrorsList' in repo:
-      repo['mirrorsList'] = None
-    
-    ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX
-    
-    Repository(repo['repoId'],
-               action = action,
-               base_url = repo['baseUrl'],
-               mirror_list = repo['mirrorsList'],
-               repo_file_name = repo['repoName'],
-               repo_template = repo_template,
-               components = ubuntu_components, # ubuntu specific
-    )
-
-def install_repos():
-  import params
-  if params.host_sys_prepped:
-    return
-
-  template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
-  _alter_repo("create", params.repo_info, template)
-  if params.service_repo_info:
-    _alter_repo("create", params.service_repo_info, template)


[08/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/YARN_metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/YARN_metrics.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/YARN_metrics.json
deleted file mode 100644
index a66bb34..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/YARN_metrics.json
+++ /dev/null
@@ -1,3486 +0,0 @@
-{
-  "NODEMANAGER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/cpu/cpu_idle": {
-              "metric": "cpu_idle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_nice": {
-              "metric": "cpu_nice",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_system": {
-              "metric": "cpu_system",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_user": {
-              "metric": "cpu_user",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_wio": {
-              "metric": "cpu_wio",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/disk_free": {
-              "metric": "disk_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/disk_total": {
-              "metric": "disk_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load/load_fifteen": {
-              "metric": "load_fifteen",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load/load_five": {
-              "metric": "load_five",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load/load_one": {
-              "metric": "load_one",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_buffered": {
-              "metric": "mem_buffered",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_cached": {
-              "metric": "mem_cached",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_free": {
-              "metric": "mem_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_shared": {
-              "metric": "mem_shared",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_total": {
-              "metric": "mem_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/swap_free": {
-              "metric": "swap_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/read_count": {
-              "metric": "read_count",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/write_count": {
-              "metric": "write_count",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/read_bytes": {
-              "metric": "read_bytes",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/write_bytes": {
-              "metric": "write_bytes",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/read_time": {
-              "metric": "read_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/write_time": {
-              "metric": "write_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/read_bps":{
-              "metric":"read_bps",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/disk/write_bps":{
-              "metric":"write_bps",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputsFailed": {
-              "metric": "mapred.ShuffleMetrics.ShuffleOutputsFailed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "rpc.rpc.RpcAuthorizationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedContainers": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedContainers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedGB": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedGB",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AvailableGB": {
-              "metric": "yarn.NodeManagerMetrics.AvailableGB",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedVCores": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedVCores",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AvailableVCores": {
-              "metric": "yarn.NodeManagerMetrics.AvailableVCores",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/BadLocalDirs": {
-              "metric": "yarn.NodeManagerMetrics.BadLocalDirs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/BadLogDirs": {
-              "metric": "yarn.NodeManagerMetrics.BadLogDirs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainerLaunchDurationAvgTime": {
-              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainerLaunchDurationNumOps": {
-              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersCompleted": {
-              "metric": "yarn.NodeManagerMetrics.ContainersCompleted",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersFailed": {
-              "metric": "yarn.NodeManagerMetrics.ContainersFailed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersIniting": {
-              "metric": "yarn.NodeManagerMetrics.ContainersIniting",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersKilled": {
-              "metric": "yarn.NodeManagerMetrics.ContainersKilled",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersLaunched": {
-              "metric": "yarn.NodeManagerMetrics.ContainersLaunched",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersRunning": {
-              "metric": "yarn.NodeManagerMetrics.ContainersRunning",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/GoodLocalDirsDiskUtilizationPerc": {
-              "metric": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/GoodLogDirsDiskUtilizationPerc": {
-              "metric": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputsOK": {
-              "metric": "mapred.ShuffleMetrics.ShuffleOutputsOK",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleConnections": {
-              "metric": "mapred.ShuffleMetrics.ShuffleConnections",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputBytes": {
-              "metric": "mapred.ShuffleMetrics.ShuffleOutputBytes",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "rpc.rpc.RpcAuthenticationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcCountCopy": {
-              "metric": "jvm.JvmMetrics.GcCountCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcCountMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcTimeMillisCopy": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcTimeMillisMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemHeapMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemMaxM": {
-              "metric": "jvm.JvmMetrics.MemMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
-              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
-              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
-              "metric": "metricssystem.MetricsSystem.NumActiveSources",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
-              "metric": "metricssystem.MetricsSystem.NumAllSinks",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumAllSources": {
-              "metric": "metricssystem.MetricsSystem.NumAllSources",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
-              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
-              "metric": "metricssystem.MetricsSystem.PublishNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
-              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
-              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpc/RpcClientBackoff": {
-              "metric": "rpc.rpc.RpcClientBackoff",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/HeartbeatAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.HeartbeatAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/HeartbeatNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.HeartbeatNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StartContainersAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.StartContainersAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StartContainersNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.StartContainersNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StopContainersAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.StopContainersAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StopContainersNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.StopContainersNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
-              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
-              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/cpu/cpu_idle": {
-              "metric": "cpu_idle",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/cpu/cpu_nice": {
-              "metric": "cpu_nice",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/cpu/cpu_system": {
-              "metric": "cpu_system",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/cpu/cpu_user": {
-              "metric": "cpu_user",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/cpu/cpu_wio": {
-              "metric": "cpu_wio",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/disk_free": {
-              "metric": "disk_free",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/disk_total": {
-              "metric": "disk_total",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/load/load_fifteen": {
-              "metric": "load_fifteen",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/load/load_five": {
-              "metric": "load_five",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/load/load_one": {
-              "metric": "load_one",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_buffered": {
-              "metric": "mem_buffered",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_cached": {
-              "metric": "mem_cached",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_free": {
-              "metric": "mem_free",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_shared": {
-              "metric": "mem_shared",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_total": {
-              "metric": "mem_total",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/swap_free": {
-              "metric": "swap_free",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/read_count": {
-              "metric": "read_count",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/write_count": {
-              "metric": "write_count",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/read_bytes": {
-              "metric": "read_bytes",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/write_bytes": {
-              "metric": "write_bytes",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/read_time": {
-              "metric": "read_time",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/write_time": {
-              "metric": "write_time",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputsFailed": {
-              "metric": "mapred.ShuffleMetrics.ShuffleOutputsFailed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "rpc.rpc.RpcAuthorizationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedContainers": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedContainers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedGB": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedGB",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AvailableGB": {
-              "metric": "yarn.NodeManagerMetrics.AvailableGB",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedVCores": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedVCores",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AvailableVCores": {
-              "metric": "yarn.NodeManagerMetrics.AvailableVCores",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/BadLocalDirs": {
-              "metric": "yarn.NodeManagerMetrics.BadLocalDirs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/BadLogDirs": {
-              "metric": "yarn.NodeManagerMetrics.BadLogDirs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainerLaunchDurationAvgTime": {
-              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainerLaunchDurationNumOps": {
-              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersCompleted": {
-              "metric": "yarn.NodeManagerMetrics.ContainersCompleted",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersFailed": {
-              "metric": "yarn.NodeManagerMetrics.ContainersFailed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersIniting": {
-              "metric": "yarn.NodeManagerMetrics.ContainersIniting",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersKilled": {
-              "metric": "yarn.NodeManagerMetrics.ContainersKilled",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersLaunched": {
-              "metric": "yarn.NodeManagerMetrics.ContainersLaunched",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersRunning": {
-              "metric": "yarn.NodeManagerMetrics.ContainersRunning",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/GoodLocalDirsDiskUtilizationPerc": {
-              "metric": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/GoodLogDirsDiskUtilizationPerc": {
-              "metric": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputsOK": {
-              "metric": "mapred.ShuffleMetrics.ShuffleOutputsOK",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleConnections": {
-              "metric": "mapred.ShuffleMetrics.ShuffleConnections",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputBytes": {
-              "metric": "mapred.ShuffleMetrics.ShuffleOutputBytes",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "rpc.rpc.RpcAuthenticationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcCountCopy": {
-              "metric": "jvm.JvmMetrics.GcCountCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcCountMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcTimeMillisCopy": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcTimeMillisMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemHeapMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemMaxM": {
-              "metric": "jvm.JvmMetrics.MemMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
-              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
-              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
-              "metric": "metricssystem.MetricsSystem.NumActiveSources",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
-              "metric": "metricssystem.MetricsSystem.NumAllSinks",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumAllSources": {
-              "metric": "metricssystem.MetricsSystem.NumAllSources",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
-              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
-              "metric": "metricssystem.MetricsSystem.PublishNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
-              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
-              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpc/RpcClientBackoff": {
-              "metric": "rpc.rpc.RpcClientBackoff",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/HeartbeatAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.HeartbeatAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/HeartbeatNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.HeartbeatNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StartContainersAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.StartContainersAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StartContainersNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.StartContainersNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StopContainersAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.StopContainersAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StopContainersNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.StopContainersNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
-              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
-              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "default": {
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsRunnable",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsNew",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.CallQueueLength",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.SentBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogInfo",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogWarn",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.ReceivedBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logError": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogError",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.NumOpenConnections",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogFatal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTerminated",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/GoodLocalDirsDiskUtilizationPerc": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/GoodLogDirsDiskUtilizationPerc": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/AllocatedGB": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.AllocatedGB",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/AllocatedVCores": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.AllocatedVCores",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/BadLocalDirs": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.BadLocalDirs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/BadLogDirs": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.BadLogDirs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/ContainersFailed": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.ContainersFailed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/AllocatedContainers": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.AllocatedContainers",
-              "pointInTime": true,
-              "temporal": false
-            }
-          }
-        }
-      }
-    ]
-  },
-  "RESOURCEMANAGER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsCompleted": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsCompleted",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-              "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-              "metric": "yarn.ClusterMetrics.NumRebootedNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsSubmitted": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsSubmitted",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumLostNMs": {
-              "metric": "yarn.ClusterMetrics.NumLostNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedContainers": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedContainers",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsKilled": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsKilled",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumActiveNMs": {
-              "metric": "yarn.ClusterMetrics.NumActiveNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsFailed": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsFailed",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/AllocateNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcCountMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsRunning": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsRunning",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ActiveApplications": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).ActiveApplications",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AMResourceLimitMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AMResourceLimitMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AMResourceLimitVCores": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AMResourceLimitVCores",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ActiveUsers": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).ActiveUsers",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AggregateContainersAllocated": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AggregateContainersAllocated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AggregateContainersReleased": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AggregateContainersReleased",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedVCores": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedVCores",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppAttemptFirstContainerAllocationDelayAvgTime": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppAttemptFirstContainerAllocationDelayAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppAttemptFirstContainerAllocationDelayNumOps": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppAttemptFirstContainerAllocationDelayNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableVCores": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AvailableVCores",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingVCores": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).PendingVCores",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedVCores": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedVCores",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/UsedAMResourceMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).UsedAMResourceMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/UsedAMResourceVCores": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).UsedAMResourceVCores",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_0": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).running_0",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_1440": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).running_1440",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_300": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).running_300",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_60": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).running_60",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
-              "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillisCopy": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingContainers": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).PendingContainers",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memMaxM": {
-              "metric": "jvm.JvmMetrics.MemMaxM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/AllocateAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetApplicationReportNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AvailableMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).PendingMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/SubmitApplicationAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetNewApplicationNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsPending": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsPending",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCountCopy": {
-              "metric": "jvm.JvmMetrics.GcCountCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/SubmitApplicationNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillisMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetApplicationReportAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetNewApplicationAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedContainers": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedContainers",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcCountMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemHeapMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/ThreadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
-              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
-              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
-              "metric": "metricssystem.MetricsSystem.NumActiveSources",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
-              "metric": "metricssystem.MetricsSystem.NumAllSinks",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumAllSources": {
-              "metric": "metricssystem.MetricsSystem.NumAllSources",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
-              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
-              "metric": "metricssystem.MetricsSystem.PublishNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
-              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
-              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpc/RpcAuthenticationFailures": {
-              "metric": "rpc.rpc.RpcAuthenticationFailures",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpc/RpcAuthenticationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpc/RpcAuthorizationFailures": {
-              "metric": "rpc.rpc.RpcAuthorizationFailures",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpc/RpcClientBackoff": {
-              "metric": "rpc.rpc.RpcClientBackoff",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/AllocateNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetClusterMetricsAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetClusterMetricsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetClusterMetricsNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetClusterMetricsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetClusterNodesAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetClusterNodesAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetClusterNodesNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetClusterNodesNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetQueueInfoAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetQueueInfoAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetQueueInfoNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetQueueInfoNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetQueueUserAclsAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetQueueUserAclsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetQueueUserAclsNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetQueueUserAclsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/NodeHeartbeatNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/RegisterNodeManagerAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterNodeManagerAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/RegisterNodeManagerNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterNodeManagerNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
-              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
-              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/AMLaunchDelayAvgTime": {
-              "metric": "yarn.ClusterMetrics.AMLaunchDelayAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/AMLaunchDelayNumOps": {
-              "metric": "yarn.ClusterMetrics.AMLaunchDelayNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/AMRegisterDelayAvgTime": {
-              "metric": "yarn.ClusterMetrics.AMRegisterDelayAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/AMRegisterDelayNumOps": {
-              "metric": "yarn.ClusterMetrics.AMRegisterDelayNumOps",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "default": {
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/rm_metrics/cluster/rebootedNMcount": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/HeapMemoryMax": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/HeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/NonHeapMemoryMax": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/NonHeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/runtime/StartTime": {
-              "metric": "java.lang:type=Runtime.StartTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumLostNMs": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/StartTime": {
-              "metric": "java.lang:type=Runtime.StartTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumActiveNMs": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumD

<TRUNCATED>

[02/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py
deleted file mode 100644
index 1609050..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management.libraries.functions import stack_tools
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.core.resources.packaging import Package
-
-def install_packages():
-  import params
-  if params.host_sys_prepped:
-    return
-
-  packages = ['unzip', 'curl']
-  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
-    stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME)
-    packages.append(stack_selector_package)
-  Package(packages,
-          retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
-          retry_count=params.agent_stack_retry_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py
deleted file mode 100644
index 14b9d99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py
+++ /dev/null
@@ -1,29 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-class BeforeRestartHook(Hook):
-
-  def hook(self, env):
-    self.run_custom_hook('before-START')
-
-if __name__ == "__main__":
-  BeforeRestartHook().execute()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh
deleted file mode 100644
index 68aa96d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export bin_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
-  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
-    (( EXIT_CODE = $EXIT_CODE | $? ))
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
deleted file mode 100644
index 7e12962..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
+++ /dev/null
@@ -1,134 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
- 
-# Removes "deprecated" messages
-log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
deleted file mode 100644
index 0f7a55c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import sys, os
-from string import join
-import ConfigParser
-
-
-DEFAULT_RACK = "/default-rack"
-DATA_FILE_NAME =  os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
-SECTION_NAME = "network_topology"
-
-class TopologyScript():
-
-  def load_rack_map(self):
-    try:
-      #RACK_MAP contains both host name vs rack and ip vs rack mappings
-      mappings = ConfigParser.ConfigParser()
-      mappings.read(DATA_FILE_NAME)
-      return dict(mappings.items(SECTION_NAME))
-    except ConfigParser.NoSectionError:
-      return {}
-
-  def get_racks(self, rack_map, args):
-    if len(args) == 1:
-      return DEFAULT_RACK
-    else:
-      return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
-
-  def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
-    #try looking up by hostname
-    rack = rack_map.get(hostname_or_ip)
-    if rack is not None:
-      return rack
-    #try looking up by ip
-    rack = rack_map.get(self.extract_ip(hostname_or_ip))
-    #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
-    return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
-
-  #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
-  def extract_ip(self, container_string):
-    return container_string.split("/")[0].split(":")[0]
-
-  def execute(self, args):
-    rack_map = self.load_rack_map()
-    rack = self.get_racks(rack_map, args)
-    print rack
-
-if __name__ == "__main__":
-  TopologyScript().execute(sys.argv)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
deleted file mode 100644
index f21e4b1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
+++ /dev/null
@@ -1,39 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from rack_awareness import create_topology_script_and_mapping
-from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink
-
-class BeforeStartHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-
-    setup_hadoop()
-    setup_configs()
-    create_javahome_symlink()
-    create_topology_script_and_mapping()
-
-if __name__ == "__main__":
-  BeforeStartHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
deleted file mode 100644
index d838211..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
+++ /dev/null
@@ -1,326 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_stack_version, compare_versions
-from ambari_commons.os_check import OSCheck
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-
-config = Script.get_config()
-
-# Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
-# This is required if tarballs are going to be copied to HDFS, so set to False
-sysprep_skip_copy_fast_jar_hdfs = default("/configurations/cluster-env/sysprep_skip_copy_fast_jar_hdfs", False)
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-dfs_type = default("/commandParams/dfs_type", "")
-stack_root = Script.get_stack_root()
-hadoop_conf_dir = "/etc/hadoop/conf"
-component_list = default("/localComponents", [])
-
-hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
-
-hadoop_metrics2_properties_content = config['configurations']['hadoop-metrics2.properties']['content']
-
-# hadoop default params
-mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
-
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_lib_home = stack_select.get_hadoop_dir("lib")
-hadoop_bin = stack_select.get_hadoop_dir("sbin")
-hadoop_home = stack_select.get_hadoop_dir("home")
-create_lib_snappy_symlinks = False
-
-  
-current_service = config['serviceName']
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#users and groups
-has_hadoop_env = 'hadoop-env' in config['configurations']
-mapred_user = config['configurations']['mapred-env']['mapred_user']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-yarn_user = config['configurations']['yarn-env']['yarn_user']
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-#hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
-
-has_namenode = not len(namenode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers) == 0
-has_hcat_server_host = not len(hcat_server_hosts) == 0
-has_hive_server_host = not len(hive_server_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_metric_collector = not len(ams_collector_hosts) == 0
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-metric_collector_port = None
-if has_metric_collector:
-  if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
-  else:
-    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
-    if metric_collector_web_address.find(':') != -1:
-      metric_collector_port = metric_collector_web_address.split(':')[1]
-    else:
-      metric_collector_port = '6188'
-  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
-    metric_collector_protocol = 'https'
-  else:
-    metric_collector_protocol = 'http'
-  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
-  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
-  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
-
-  pass
-metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
-
-# Cluster Zookeeper quorum
-zookeeper_quorum = None
-if has_zk_host:
-  if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
-    zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
-  else:
-    zookeeper_clientPort = '2181'
-  zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(config['clusterHostInfo']['zookeeper_hosts'])
-  # last port config
-  zookeeper_quorum += ':' + zookeeper_clientPort
-
-#hadoop params
-
-if has_namenode or dfs_type == 'HCFS':
-  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
-  task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
-
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hbase_tmp_dir = "/tmp/hbase-hbase"
-#db params
-server_db_name = config['hostLevelParams']['db_name']
-db_driver_filename = config['hostLevelParams']['db_driver_filename']
-oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
-mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
-ambari_server_resources = config['hostLevelParams']['jdk_location']
-oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
-mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
-
-ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
-ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
-ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
-ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
-
-if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
-  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
-else:
-  rca_enabled = False
-rca_disabled_prefix = "###"
-if rca_enabled == True:
-  rca_prefix = ""
-else:
-  rca_prefix = rca_disabled_prefix
-
-#hadoop-env.sh
-java_home = config['hostLevelParams']['java_home']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#log4j.properties
-
-yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
-
-dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
-
-#log4j.properties
-if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
-  log4j_props = config['configurations']['hdfs-log4j']['content']
-  if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
-    log4j_props += config['configurations']['yarn-log4j']['content']
-else:
-  log4j_props = None
-
-refresh_topology = False
-command_params = config["commandParams"] if "commandParams" in config else None
-if command_params is not None:
-  refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
-  
-ambari_libs_dir = "/var/lib/ambari-agent/lib"
-is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-
-#host info
-all_hosts = default("/clusterHostInfo/all_hosts", [])
-all_racks = default("/clusterHostInfo/all_racks", [])
-all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-
-#topology files
-net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
-net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
-net_topology_mapping_data_file_name = 'topology_mappings.data'
-net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
-
-#Added logic to create /tmp and /user directory for HCFS stack.  
-has_core_site = 'core-site' in config['configurations']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-kinit_path_local = get_kinit_path()
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 0770
-
-
-##### Namenode RPC ports - metrics config section start #####
-
-# Figure out the rpc ports for current namenode
-nn_rpc_client_port = None
-nn_rpc_dn_port = None
-nn_rpc_healthcheck_port = None
-
-namenode_id = None
-namenode_rpc = None
-
-dfs_ha_enabled = False
-dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
-if dfs_ha_nameservices is None:
-  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-
-dfs_ha_namemodes_ids_list = []
-other_namenode_id = None
-
-if dfs_ha_namenode_ids:
- dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
- dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
- if dfs_ha_namenode_ids_array_len > 1:
-   dfs_ha_enabled = True
-
-if dfs_ha_enabled:
- for nn_id in dfs_ha_namemodes_ids_list:
-   nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-   if hostname in nn_host:
-     namenode_id = nn_id
-     namenode_rpc = nn_host
-   pass
- pass
-else:
- namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', None)
-
-if namenode_rpc:
- nn_rpc_client_port = namenode_rpc.split(':')[1].strip()
-
-if dfs_ha_enabled:
- dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
- dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
-else:
- dfs_service_rpc_address = default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None)
- dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), None)
-
-if dfs_service_rpc_address:
- nn_rpc_dn_port = dfs_service_rpc_address.split(':')[1].strip()
-
-if dfs_lifeline_rpc_address:
- nn_rpc_healthcheck_port = dfs_lifeline_rpc_address.split(':')[1].strip()
-
-is_nn_client_port_configured = False if nn_rpc_client_port is None else True
-is_nn_dn_port_configured = False if nn_rpc_dn_port is None else True
-is_nn_healthcheck_port_configured = False if nn_rpc_healthcheck_port is None else True
-
-##### end #####
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs,
-  immutable_paths = get_not_managed_resources(),
-  dfs_type = dfs_type
-)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
deleted file mode 100644
index 548f051..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management.core.resources import File
-from resource_management.core.source import StaticFile, Template
-from resource_management.libraries.functions import format
-
-
-def create_topology_mapping():
-  import params
-
-  File(params.net_topology_mapping_data_file_path,
-       content=Template("topology_mappings.data.j2"),
-       owner=params.hdfs_user,
-       group=params.user_group,
-       only_if=format("test -d {net_topology_script_dir}"))
-
-def create_topology_script():
-  import params
-
-  File(params.net_topology_script_file_path,
-       content=StaticFile('topology_script.py'),
-       mode=0755,
-       only_if=format("test -d {net_topology_script_dir}"))
-
-def create_topology_script_and_mapping():
-  import params
-  if params.has_hadoop_env:
-    create_topology_mapping()
-    create_topology_script()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
deleted file mode 100644
index 5dce8e0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
+++ /dev/null
@@ -1,191 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
-
-from resource_management import *
-
-def setup_hadoop():
-  """
-  Setup hadoop files and directories
-  """
-  import params
-
-  Execute(("setenforce","0"),
-          only_if="test -f /selinux/enforce",
-          not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)",
-          sudo=True,
-  )
-
-  #directories
-  if params.has_namenode or params.dfs_type == 'HCFS':
-    Directory(params.hdfs_log_dir_prefix,
-              create_parents = True,
-              owner='root',
-              group=params.user_group,
-              mode=0775,
-              cd_access='a',
-    )
-    if params.has_namenode:
-      Directory(params.hadoop_pid_dir_prefix,
-              create_parents = True,
-              owner='root',
-              group='root',
-              cd_access='a',
-      )
-    Directory(params.hadoop_tmp_dir,
-              create_parents = True,
-              owner=params.hdfs_user,
-              cd_access='a',
-              )
-  #files
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-      
-    # if WebHDFS is not enabled we need this jar to create hadoop folders and copy tarballs to HDFS.
-    if params.sysprep_skip_copy_fast_jar_hdfs:
-      print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
-    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
-      # for source-code of jar goto contrib/fast-hdfs-resource
-      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
-           mode=0644,
-           content=StaticFile("fast-hdfs-resource.jar")
-      )
-      
-    if os.path.exists(params.hadoop_conf_dir):
-      File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
-           owner=tc_owner,
-           content=Template('commons-logging.properties.j2')
-      )
-
-      health_check_template_name = "health_check"
-      File(os.path.join(params.hadoop_conf_dir, health_check_template_name),
-           owner=tc_owner,
-           content=Template(health_check_template_name + ".j2")
-      )
-
-      log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
-      if (params.log4j_props != None):
-        File(log4j_filename,
-             mode=0644,
-             group=params.user_group,
-             owner=params.hdfs_user,
-             content=params.log4j_props
-        )
-      elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
-        File(log4j_filename,
-             mode=0644,
-             group=params.user_group,
-             owner=params.hdfs_user,
-        )
-
-      File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
-           owner=params.hdfs_user,
-           group=params.user_group,
-           content=InlineTemplate(params.hadoop_metrics2_properties_content)
-      )
-
-    if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list:
-       create_dirs()
-
-    create_microsoft_r_dir()
-
-
-def setup_configs():
-  """
-  Creates configs for services HDFS mapred
-  """
-  import params
-
-  if params.has_namenode or params.dfs_type == 'HCFS':
-    if os.path.exists(params.hadoop_conf_dir):
-      File(params.task_log4j_properties_location,
-           content=StaticFile("task-log4j.properties"),
-           mode=0755
-      )
-
-    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
-      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
-           owner=params.hdfs_user,
-           group=params.user_group
-      )
-    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
-      File(os.path.join(params.hadoop_conf_dir, 'masters'),
-                owner=params.hdfs_user,
-                group=params.user_group
-      )
-
-  generate_include_file()
-
-
-def generate_include_file():
-  import params
-
-  if params.has_namenode and params.dfs_hosts and params.has_slaves:
-    include_hosts_list = params.slave_hosts
-    File(params.dfs_hosts,
-         content=Template("include_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-
-def create_javahome_symlink():
-  if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
-    Directory("/usr/jdk64/",
-         create_parents = True,
-    )
-    Link("/usr/jdk/jdk1.6.0_31",
-         to="/usr/jdk64/jdk1.6.0_31",
-    )
-
-def create_dirs():
-   import params
-   params.HdfsResource(params.hdfs_tmp_dir,
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.hdfs_user,
-                       mode=0777
-   )
-   params.HdfsResource(params.smoke_hdfs_user_dir,
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.smoke_user,
-                       mode=params.smoke_hdfs_user_mode
-   )
-   params.HdfsResource(None,
-                      action="execute"
-   )
-
-def create_microsoft_r_dir():
-  import params
-  if 'MICROSOFT_R_NODE_CLIENT' in params.component_list and params.default_fs:
-    directory = '/user/RevoShare'
-    try:
-      params.HdfsResource(directory,
-                          type="directory",
-                          action="create_on_execute",
-                          owner=params.hdfs_user,
-                          mode=0777)
-      params.HdfsResource(None, action="execute")
-    except Exception as exception:
-      Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception)))
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
deleted file mode 100644
index 2197ba5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#Logging Implementation
-
-#Log4J
-org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
-
-#JDK Logger
-#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
deleted file mode 100644
index 1adba80..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
deleted file mode 100644
index 2f3aab6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ /dev/null
@@ -1,105 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_ganglia_server %}
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
-datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
-jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
-tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
-maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
-reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
-resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
-nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
-historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
-journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
-nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
-supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-{% endif %}
-
-{% if has_metric_collector %}
-
-*.period={{metrics_collection_period}}
-*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-*.sink.timeline.period={{metrics_collection_period}}
-*.sink.timeline.sendInterval={{metrics_report_interval}}000
-*.sink.timeline.slave.host.name={{hostname}}
-*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
-*.sink.timeline.protocol={{metric_collector_protocol}}
-*.sink.timeline.port={{metric_collector_port}}
-
-# HTTPS properties
-*.sink.timeline.truststore.path = {{metric_truststore_path}}
-*.sink.timeline.truststore.type = {{metric_truststore_type}}
-*.sink.timeline.truststore.password = {{metric_truststore_password}}
-
-datanode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-namenode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
-nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
-jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
-journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
-
-resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
-
-{% if is_nn_client_port_configured %}
-# Namenode rpc ports customization
-namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
-{% endif %}
-{% if is_nn_dn_port_configured %}
-namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
-{% endif %}
-{% if is_nn_healthcheck_port_configured %}
-namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
-{% endif %}
-
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
deleted file mode 100644
index 0a03d17..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
+++ /dev/null
@@ -1,81 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-# Run all checks
-for check in disks ; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
deleted file mode 100644
index 4a9e713..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
deleted file mode 100644
index 15034d6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-    #
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-[network_topology]
-{% for host in all_hosts %}
-{% if host in slave_hosts %}
-{{host}}={{all_racks[loop.index-1]}}
-{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}}
-{% endif %}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
deleted file mode 100644
index 9579d0f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
+++ /dev/null
@@ -1,78 +0,0 @@
-{
-  "properties": {
-    "realm": "${kerberos-env/realm}",
-    "keytab_dir": "/etc/security/keytabs",
-    "additional_realms": ""
-  },
-  "identities": [
-    {
-      "name": "spnego",
-      "principal": {
-        "value": "HTTP/_HOST@${realm}",
-        "type": "service"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/spnego.service.keytab",
-        "owner": {
-          "name": "root",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        }
-      }
-    },
-    {
-      "name": "smokeuser",
-      "principal": {
-        "value": "${cluster-env/smokeuser}-${cluster_name|toLower()}@${realm}",
-        "type": "user",
-        "configuration": "cluster-env/smokeuser_principal_name",
-        "local_username": "${cluster-env/smokeuser}"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/smokeuser.headless.keytab",
-        "owner": {
-          "name": "${cluster-env/smokeuser}",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        },
-        "configuration": "cluster-env/smokeuser_keytab"
-      }
-    }
-  ],
-  "services": [
-    {
-      "name": "AMBARI",
-      "components": [
-        {
-          "name": "AMBARI_SERVER",
-          "identities": [
-            {
-              "name": "ambari-server",
-              "principal": {
-                "value": "ambari-server-${cluster_name|toLower()}@${realm}",
-                "type": "user",
-                "configuration": "cluster-env/ambari_principal_name"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/ambari.server.keytab",
-                "owner": {
-                  "access": "r"
-                }
-              }
-            },
-            {
-              "name" : "ambari-server_spnego",
-              "reference" : "/spnego"
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
deleted file mode 100644
index 0364d41..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <versions>
-    <active>true</active>
-  </versions>
-  <minJdk>1.7</minJdk>
-  <maxJdk>1.8</maxJdk>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
deleted file mode 100644
index dd87b72..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
+++ /dev/null
@@ -1,323 +0,0 @@
-{
-  "stack_features": [
-    {
-      "name": "snappy",
-      "description": "Snappy compressor/decompressor support",
-      "min_version": "2.0.0.0",
-      "max_version": "2.2.0.0"
-    },
-    {
-      "name": "lzo",
-      "description": "LZO libraries support",
-      "min_version": "2.2.1.0"
-    },
-    {
-      "name": "express_upgrade",
-      "description": "Express upgrade support",
-      "min_version": "2.1.0.0"
-    },
-    {
-      "name": "rolling_upgrade",
-      "description": "Rolling upgrade support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "config_versioning",
-      "description": "Configurable versions support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "datanode_non_root",
-      "description": "DataNode running as non-root support (AMBARI-7615)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "remove_ranger_hdfs_plugin_env",
-      "description": "HDFS removes Ranger env files (AMBARI-14299)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger",
-      "description": "Ranger Service support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_tagsync_component",
-      "description": "Ranger Tagsync component support (AMBARI-14383)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "phoenix",
-      "description": "Phoenix Service support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "nfs",
-      "description": "NFS support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "tez_for_spark",
-      "description": "Tez dependency for Spark",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "timeline_state_store",
-      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "copy_tarball_to_hdfs",
-      "description": "Copy tarball to HDFS support (AMBARI-12113)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "spark_16plus",
-      "description": "Spark 1.6+",
-      "min_version": "2.4.0.0"
-    },
-    {
-      "name": "spark_thriftserver",
-      "description": "Spark Thrift Server",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "storm_kerberos",
-      "description": "Storm Kerberos support (AMBARI-7570)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "storm_ams",
-      "description": "Storm AMS integration (AMBARI-10710)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "create_kafka_broker_id",
-      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_listeners",
-      "description": "Kafka listeners (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_kerberos",
-      "description": "Kafka Kerberos support (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "pig_on_tez",
-      "description": "Pig on Tez support (AMBARI-7863)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_usersync_non_root",
-      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger_audit_db_support",
-      "description": "Ranger Audit to DB support",
-      "min_version": "2.2.0.0",
-      "max_version": "2.5.0.0"
-    },
-    {
-      "name": "accumulo_kerberos_user_auth",
-      "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "knox_versioned_data_dir",
-      "description": "Use versioned data dir for Knox (AMBARI-13164)",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "knox_sso_topology",
-      "description": "Knox SSO Topology support (AMBARI-13975)",
-      "min_version": "2.3.8.0"
-    },
-    {
-      "name": "atlas_rolling_upgrade",
-      "description": "Rolling upgrade support for Atlas",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "oozie_admin_user",
-      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_create_hive_tez_configs",
-      "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_setup_shared_lib",
-      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_host_kerberos",
-      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
-      "min_version": "2.0.0.0",
-      "max_version": "2.2.0.0"
-    },
-    {
-      "name": "falcon_extensions",
-      "description": "Falcon Extension",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hive_metastore_upgrade_schema",
-      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server_interactive",
-      "description": "Hive server interactive support (AMBARI-15573)",
-      "min_version": "2.5.0.0"
-     },
-    {
-      "name": "hive_webhcat_specific_configs",
-      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_purge_table",
-      "description": "Hive purge table support (AMBARI-12260)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server2_kerberized_env",
-      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
-      "min_version": "2.2.3.0",
-      "max_version": "2.2.5.0"
-     },
-    {
-      "name": "hive_env_heapsize",
-      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_kms_hsm_support",
-      "description": "Ranger KMS HSM support (AMBARI-15752)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_log4j_support",
-      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kerberos_support",
-      "description": "Ranger Kerberos support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hive_metastore_site_support",
-      "description": "Hive Metastore site support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_usersync_password_jceks",
-      "description": "Saving Ranger Usersync credentials in jceks",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_install_infra_client",
-      "description": "Ambari Infra Service support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "falcon_atlas_support_2_3",
-      "description": "Falcon Atlas integration support for 2.3 stack",
-      "min_version": "2.3.99.0",
-      "max_version": "2.4.0.0"
-    },
-    {
-      "name": "falcon_atlas_support",
-      "description": "Falcon Atlas integration",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hbase_home_directory",
-      "description": "Hbase home directory in HDFS needed for HBASE backup",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_livy",
-      "description": "Livy as slave component of spark",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_ranger_plugin_support",
-      "description": "Atlas Ranger plugin support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_conf_dir_in_path",
-      "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
-      "min_version": "2.3.0.0",
-      "max_version": "2.4.99.99"
-    },
-    {
-      "name": "atlas_upgrade_support",
-      "description": "Atlas supports express and rolling upgrades",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_hook_support",
-      "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_pid_support",
-      "description": "Ranger Service support pid generation AMBARI-16756",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kms_pid_support",
-      "description": "Ranger KMS Service support pid generation",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_admin_password_change",
-      "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "storm_metrics_apache_classes",
-      "description": "Metrics sink for Storm that uses Apache class names",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_java_opts_support",
-      "description": "Allow Spark to generate java-opts file",
-      "min_version": "2.2.0.0",
-      "max_version": "2.4.0.0"
-    },
-    {
-      "name": "atlas_hbase_setup",
-      "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_hive_plugin_jdbc_url",
-      "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "zkfc_version_advertised",
-      "description": "ZKFC advertise version",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "phoenix_core_hdfs_site_required",
-      "description": "HDFS and CORE site required for Phoenix",
-      "max_version": "2.5.9.9"
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
deleted file mode 100644
index d1aab4b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
-  "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],
-  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml
deleted file mode 100644
index 5145064..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml
+++ /dev/null
@@ -1,132 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <latest>http://s3.amazonaws.com/dev.hortonworks.com/HDP/hdp_urlinfo.json</latest>
-  <os family="redhat6">
-    <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/3.x/updates/3.0.0.0</baseurl>
-      <repoid>HDP-3.0</repoid>
-      <reponame>HDP</reponame>
-      <unique>true</unique>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6</baseurl>
-      <repoid>HDP-UTILS-1.1.0.21</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <unique>false</unique>
-    </repo>
-  </os>
-  <os family="redhat7">
-    <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos7/3.x/updates/3.0.0.0</baseurl>
-      <repoid>HDP-3.0</repoid>
-      <reponame>HDP</reponame>
-      <unique>true</unique>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos7</baseurl>
-      <repoid>HDP-UTILS-1.1.0.21</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <unique>false</unique>
-    </repo>
-  </os>
-  <os family="suse11">
-    <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11sp3/3.x/updates/3.0.0.0</baseurl>
-      <repoid>HDP-3.0</repoid>
-      <reponame>HDP</reponame>
-      <unique>true</unique>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/suse11sp3</baseurl>
-      <repoid>HDP-UTILS-1.1.0.21</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <unique>false</unique>
-    </repo>
-  </os>
-  <os family="suse12">
-    <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/sles12/3.x/updates/3.0.0.0</baseurl>
-      <repoid>HDP-3.0</repoid>
-      <reponame>HDP</reponame>
-      <unique>true</unique>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/suse11sp3</baseurl>
-      <repoid>HDP-UTILS-1.1.0.21</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <unique>false</unique>
-    </repo>
-  </os>
-  <os family="ubuntu12">
-    <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu12/3.x/updates/2.3.0.0</baseurl>
-      <repoid>HDP-3.0</repoid>
-      <reponame>HDP</reponame>
-      <unique>true</unique>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu12</baseurl>
-      <repoid>HDP-UTILS-1.1.0.21</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <unique>false</unique>
-    </repo>
-  </os>
-  <os family="debian7">
-    <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/debian7/3.x/updates/2.3.0.0</baseurl>
-      <repoid>HDP-3.0</repoid>
-      <reponame>HDP</reponame>
-      <unique>true</unique>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/debian6</baseurl>
-      <repoid>HDP-UTILS-1.1.0.21</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <unique>false</unique>
-    </repo>
-  </os>
-  <os family="ubuntu14">
-    <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu14/3.x/updates/2.3.0.0</baseurl>
-      <repoid>HDP-3.0</repoid>
-      <reponame>HDP</reponame>
-      <unique>true</unique>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu12</baseurl>
-      <repoid>HDP-UTILS-1.1.0.21</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <unique>false</unique>
-    </repo>
-  </os>
-  <os family="ubuntu16">
-    <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu14/3.x/updates/3.0.0.0</baseurl>
-      <repoid>HDP-3.0</repoid>
-      <reponame>HDP</reponame>
-      <unique>true</unique>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu12</baseurl>
-      <repoid>HDP-UTILS-1.1.0.21</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <unique>false</unique>
-    </repo>
-  </os>
-</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
deleted file mode 100644
index e680c1b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
+++ /dev/null
@@ -1,166 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <!-- These configs were inherited from HDP 2.3 -->
-  <!-- hadoop-env.sh -->
-  <property>
-    <name>content</name>
-    <display-name>hadoop-env template</display-name>
-    <description>This is the jinja template for hadoop-env.sh file</description>
-    <value>
-      # Set Hadoop-specific environment variables here.
-
-      # The only required environment variable is JAVA_HOME.  All others are
-      # optional.  When running a distributed configuration it is best to
-      # set JAVA_HOME in this file, so that it is correctly defined on
-      # remote nodes.
-
-      # The java implementation to use.  Required.
-      export JAVA_HOME={{java_home}}
-      export HADOOP_HOME_WARN_SUPPRESS=1
-
-      # Hadoop home directory
-      export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-      # Hadoop Configuration Directory
-
-      {# this is different for HDP1 #}
-      # Path to jsvc required by secure HDP 2.0 datanode
-      export JSVC_HOME={{jsvc_path}}
-
-
-      # The maximum amount of heap to use, in MB. Default is 1000.
-      export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-
-      export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
-
-      # Extra Java runtime options.  Empty by default.
-      export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
-
-      # Command specific options appended to HADOOP_OPTS when specified
-      HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
-
-      HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-
-      {% if java_version &lt; 8 %}
-      SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
-      export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
-      export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
-
-      export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-      # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-      export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
-
-      {% else %}
-      SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
-      export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
-      export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
-
-      export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-      # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-      export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-      {% endif %}
-
-      HADOOP_NFS3_OPTS="-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}"
-      HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
-
-
-      # On secure datanodes, user to run the datanode as after dropping privileges
-      export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
-
-      # Extra ssh options.  Empty by default.
-      export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
-
-      # Where log files are stored.  $HADOOP_HOME/logs by default.
-      export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
-
-      # History server logs
-      export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
-
-      # Where log files are stored in the secure data environment.
-      export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-      # File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-      # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-      # host:path where hadoop code should be rsync'd from.  Unset by default.
-      # export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-      # Seconds to sleep between slave commands.  Unset by default.  This
-      # can be useful in large clusters, where, e.g., slave rsyncs can
-      # otherwise arrive faster than the master can service them.
-      # export HADOOP_SLAVE_SLEEP=0.1
-
-      # The directory where pid files are stored. /tmp by default.
-      export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
-      export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-      # History server pid
-      export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
-
-      YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
-
-      # A string representing this instance of hadoop. $USER by default.
-      export HADOOP_IDENT_STRING=$USER
-
-      # The scheduling priority for daemon processes.  See 'man nice'.
-
-      # export HADOOP_NICENESS=10
-
-      # Add database libraries
-      JAVA_JDBC_LIBS=""
-      if [ -d "/usr/share/java" ]; then
-      for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
-      do
-      JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-      done
-      fi
-
-      # Add libraries to the hadoop classpath - some may not need a colon as they already include it
-      export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}
-
-      # Setting path to hdfs command line
-      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-
-      # Mostly required for hadoop 2.0
-      export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}
-
-      export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
-
-
-      # Fix temporary bug, when ulimit from conf files is not picked up, without full relogin.
-      # Makes sense to fix only when runing DN as root
-      if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
-      {% if is_datanode_max_locked_memory_set %}
-      ulimit -l {{datanode_max_locked_memory}}
-      {% endif %}
-      ulimit -n {{hdfs_user_nofile_limit}}
-      fi
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>


[07/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/YARN_widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/YARN_widgets.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/YARN_widgets.json
deleted file mode 100644
index 782f21d..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/YARN_widgets.json
+++ /dev/null
@@ -1,670 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_yarn_dashboard",
-      "display_name": "Standard YARN Dashboard",
-      "section_name": "YARN_SUMMARY",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Memory Utilization",
-          "description": "Percentage of total memory allocated to containers running in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AllocatedMB",
-              "metric_path": "metrics/yarn/Queue/root/AllocatedMB",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AvailableMB",
-              "metric_path": "metrics/yarn/Queue/root/AvailableMB",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Memory Utilization",
-              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedMB / (yarn.QueueMetrics.Queue=root.AllocatedMB + yarn.QueueMetrics.Queue=root.AvailableMB)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "CPU Utilization",
-          "description": "Percentage of total virtual cores allocated to containers running in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AllocatedVCores",
-              "metric_path": "metrics/yarn/Queue/root/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AvailableVCores",
-              "metric_path": "metrics/yarn/Queue/root/AvailableVCores",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Total Allocatable CPU Utilized across NodeManager",
-              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedVCores / (yarn.QueueMetrics.Queue=root.AllocatedVCores + yarn.QueueMetrics.Queue=root.AvailableVCores)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Bad Local Disks",
-          "description": "Number of unhealthy local disks across all NodeManagers.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.BadLocalDirs",
-              "metric_path": "metrics/yarn/BadLocalDirs",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.BadLogDirs",
-              "metric_path": "metrics/yarn/BadLogDirs",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Number of unhealthy local disks for NodeManager",
-              "value": "${yarn.NodeManagerMetrics.BadLocalDirs + yarn.NodeManagerMetrics.BadLogDirs}"
-            }
-          ],
-          "properties": {
-            "display_unit": ""
-          }
-        },
-        {
-          "widget_name": "Container Failures",
-          "description": "Percentage of all containers failing in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
-              "metric_path": "metrics/yarn/ContainersFailed._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
-              "metric_path": "metrics/yarn/ContainersCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
-              "metric_path": "metrics/yarn/ContainersLaunched._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersIniting._sum",
-              "metric_path": "metrics/yarn/ContainersIniting._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
-              "metric_path": "metrics/yarn/ContainersKilled._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersRunning._sum",
-              "metric_path": "metrics/yarn/ContainersRunning._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Container Failures",
-              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting._sum + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning._sum)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "App Failures",
-          "description": "Percentage of all launched applications failing in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsFailed._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsFailed._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsKilled._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsKilled._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
-              "metric_path": "metrics/yarn/Queue/root/AppsPending",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsRunning",
-              "metric_path": "metrics/yarn/Queue/root/AppsRunning",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsSubmitted._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsSubmitted._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsCompleted._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "App Failures",
-              "value": "${(yarn.QueueMetrics.Queue=root.AppsFailed._rate/(yarn.QueueMetrics.Queue=root.AppsFailed._rate + yarn.QueueMetrics.Queue=root.AppsKilled._rate + yarn.QueueMetrics.Queue=root.AppsPending + yarn.QueueMetrics.Queue=root.AppsRunning + yarn.QueueMetrics.Queue=root.AppsSubmitted._rate + yarn.QueueMetrics.Queue=root.AppsCompleted._rate)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Pending Apps",
-          "description": "Count of applications waiting for cluster resources to become available.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
-              "metric_path": "metrics/yarn/Queue/root/AppsPending",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Pending Apps",
-              "value": "${yarn.QueueMetrics.Queue=root.AppsPending}"
-            }
-          ],
-          "properties": {
-            "display_unit": "Apps",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Memory",
-          "description": "Percentage of memory used across all NodeManager hosts.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "mem_total._sum",
-              "metric_path": "metrics/memory/mem_total._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "mem_free._sum",
-              "metric_path": "metrics/memory/mem_free._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Memory utilization",
-              "value": "${((mem_total._sum - mem_free._sum)/mem_total._sum) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Disk",
-          "description": "Sum of disk throughput for all NodeManager hosts.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "read_bps._sum",
-              "metric_path": "metrics/disk/read_bps._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "write_bps._sum",
-              "metric_path": "metrics/disk/write_bps._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Read throughput",
-              "value": "${read_bps._sum/1048576}"
-            },
-            {
-              "name": "Write throughput",
-              "value": "${write_bps._sum/1048576}"
-            }
-          ],
-          "properties": {
-            "display_unit": "Mbps",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Network",
-          "description": "Average of Network utilized across all NodeManager hosts.",
-          "default_section_name": "YARN_SUMMARY",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "pkts_in._avg",
-              "metric_path": "metrics/network/pkts_in._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "pkts_out._avg",
-              "metric_path": "metrics/network/pkts_out._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Packets In",
-              "value": "${pkts_in._avg}"
-            },
-            {
-              "name": "Packets Out",
-              "value": "${pkts_out._avg}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster CPU",
-          "description": "Percentage of CPU utilized across all NodeManager hosts.",
-          "default_section_name": "YARN_SUMMARY",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "cpu_system._sum",
-              "metric_path": "metrics/cpu/cpu_system._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_user._sum",
-              "metric_path": "metrics/cpu/cpu_user._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_nice._sum",
-              "metric_path": "metrics/cpu/cpu_nice._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_idle._sum",
-              "metric_path": "metrics/cpu/cpu_idle._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_wio._sum",
-              "metric_path": "metrics/cpu/cpu_wio._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "CPU utilization",
-              "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1",
-            "display_unit": "%"
-          }
-        }
-      ]
-    },
-    {
-      "layout_name": "default_yarn_heatmap",
-      "display_name": "YARN Heatmaps",
-      "section_name": "YARN_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "YARN local disk space utilization per NodeManager",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
-              "metric_path": "metrics/yarn/GoodLocalDirsDiskUtilizationPerc",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
-              "metric_path": "metrics/yarn/GoodLogDirsDiskUtilizationPerc",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "YARN local disk space utilization per NodeManager",
-              "value": "${(yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc + yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc)/2}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Total Allocatable RAM Utilized per NodeManager",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedGB",
-              "metric_path": "metrics/yarn/AllocatedGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.AvailableGB",
-              "metric_path": "metrics/yarn/AvailableGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Total Allocatable RAM Utilized per NodeManager",
-              "value": "${(yarn.NodeManagerMetrics.AllocatedGB/(yarn.NodeManagerMetrics.AvailableGB + yarn.NodeManagerMetrics.AllocatedGB)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Total Allocatable CPU Utilized per NodeManager",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
-              "metric_path": "metrics/yarn/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.AvailableVCores",
-              "metric_path": "metrics/yarn/AvailableVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Total Allocatable CPU Utilized per NodeManager",
-              "value": "${(yarn.NodeManagerMetrics.AllocatedVCores/(yarn.NodeManagerMetrics.AllocatedVCores + yarn.NodeManagerMetrics.AvailableVCores)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Container Failures",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
-              "metric_path": "metrics/yarn/ContainersFailed._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
-              "metric_path": "metrics/yarn/ContainersCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
-              "metric_path": "metrics/yarn/ContainersLaunched._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersIniting",
-              "metric_path": "metrics/yarn/ContainersIniting",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
-              "metric_path": "metrics/yarn/ContainersKilled._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersRunning",
-              "metric_path": "metrics/yarn/ContainersRunning",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Container Failures",
-              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "NodeManager GC Time",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
-              "metric_path": "metrics/jvm/gcTimeMillis",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager Garbage Collection Time",
-              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "max_limit": "10000"
-          }
-        },
-        {
-          "widget_name": "NodeManager JVM Heap Memory Used",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
-              "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager JVM Heap Memory Used",
-              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "512"
-          }
-        },
-        {
-          "widget_name": "Allocated Containers",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedContainers",
-              "metric_path": "metrics/yarn/AllocatedContainers",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Allocated Containers",
-              "value": "${yarn.NodeManagerMetrics.AllocatedContainers}"
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "NodeManager RAM Utilized",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedGB",
-              "metric_path": "metrics/yarn/AllocatedGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager RAM Utilized",
-              "value": "${yarn.NodeManagerMetrics.AllocatedGB}"
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "NodeManager CPU Utilized",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
-              "metric_path": "metrics/yarn/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager CPU Utilized",
-              "value": "${yarn.NodeManagerMetrics.AllocatedVCores}"
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/alerts.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/alerts.json
deleted file mode 100644
index c4a58bb..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/alerts.json
+++ /dev/null
@@ -1,392 +0,0 @@
-{
-  "MAPREDUCE2": {
-    "service": [],
-    "HISTORYSERVER": [
-      {
-        "name": "mapreduce_history_server_webui",
-        "label": "History Server Web UI",
-        "description": "This host-level alert is triggered if the History Server Web UI is unreachable.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
-            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
-            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      },
-      {
-        "name": "mapreduce_history_server_cpu",
-        "label": "History Server CPU Utilization",
-        "description": "This host-level alert is triggered if the percent of CPU utilization on the History Server exceeds the configured critical threshold. The threshold values are in percent.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
-            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
-            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
-            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "{1} CPU, load {0:.1%}"
-            },
-            "warning": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 200
-            },
-            "critical": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 250
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          },
-          "jmx": {
-            "property_list": [
-              "java.lang:type=OperatingSystem/SystemCpuLoad",
-              "java.lang:type=OperatingSystem/AvailableProcessors"
-            ],
-            "value": "{0} * 100"
-          }
-        }
-      },
-      {
-        "name": "mapreduce_history_server_rpc_latency",
-        "label": "History Server RPC Latency",
-        "description": "This host-level alert is triggered if the History Server operations RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for operations. The threshold values are in milliseconds.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
-            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
-            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
-            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
-            },
-            "warning": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 3000
-            },          
-            "critical": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 5000
-            },
-            "units" : "ms"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=JobHistoryServer,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
-              "Hadoop:service=JobHistoryServer,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
-            ],
-            "value": "{0}"
-          }
-        }
-      }
-    ]
-  },
-  "YARN": {
-    "service": [
-      {
-        "name": "yarn_nodemanager_webui_percent",
-        "label": "Percent NodeManagers Available",
-        "description": "This alert is triggered if the number of down NodeManagers in the cluster is greater than the configured critical threshold. It aggregates the results of NodeManager process checks.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "yarn_nodemanager_webui",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 10
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 30
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          }
-        }
-      }
-    ],
-    "NODEMANAGER": [
-      {
-        "name": "yarn_nodemanager_webui",
-        "label": "NodeManager Web UI",
-        "description": "This host-level alert is triggered if the NodeManager Web UI is unreachable.",
-        "interval": 1,
-        "scope": "HOST",
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{yarn-site/yarn.nodemanager.webapp.address}}",
-            "https": "{{yarn-site/yarn.nodemanager.webapp.https.address}}",
-            "https_property": "{{yarn-site/yarn.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "default_port": 8042,
-            "kerberos_keytab": "{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      },
-      {
-        "name": "yarn_nodemanager_health",
-        "label": "NodeManager Health",
-        "description": "This host-level alert checks the node health property available from the NodeManager component.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "YARN/2.1.0.2.0/package/alerts/alert_nodemanager_health.py",
-          "parameters": [
-            {
-              "name": "connection.timeout",
-              "display_name": "Connection Timeout",
-              "value": 5.0,
-              "type": "NUMERIC",
-              "description": "The maximum time before this alert is considered to be CRITICAL",
-              "units": "seconds",
-              "threshold": "CRITICAL"
-            }
-          ]
-        }
-      }
-    ],
-    "RESOURCEMANAGER": [
-      {
-        "name": "yarn_resourcemanager_webui",
-        "label": "ResourceManager Web UI",
-        "description": "This host-level alert is triggered if the ResourceManager Web UI is unreachable.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
-            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
-            "https_property": "{{yarn-site/yarn.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
-              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
-              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      },
-      {
-        "name": "yarn_resourcemanager_cpu",
-        "label": "ResourceManager CPU Utilization",
-        "description": "This host-level alert is triggered if CPU utilization of the ResourceManager exceeds certain warning and critical thresholds. It checks the ResourceManager JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
-            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
-            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
-            "https_property": "{{yarn-site/yarn.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
-              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
-              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "{1} CPU, load {0:.1%}"
-            },
-            "warning": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 200
-            },
-            "critical": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 250
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          },
-          "jmx": {
-            "property_list": [
-              "java.lang:type=OperatingSystem/SystemCpuLoad",
-              "java.lang:type=OperatingSystem/AvailableProcessors"
-            ],
-            "value": "{0} * 100"
-          }
-        }
-      },
-      {
-        "name": "yarn_resourcemanager_rpc_latency",
-        "label": "ResourceManager RPC Latency",
-        "description": "This host-level alert is triggered if the ResourceManager operations RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for ResourceManager operations. The threshold values are in milliseconds.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
-            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
-            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
-            "https_property": "{{yarn-site/yarn.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
-              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
-              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
-            },
-            "warning": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 3000
-            },          
-            "critical": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 5000
-            },
-            "units" : "ms"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=ResourceManager,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
-              "Hadoop:service=ResourceManager,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
-            ],
-            "value": "{0}"
-          }
-        }
-      },
-      {
-        "name": "nodemanager_health_summary",
-        "label": "NodeManager Health Summary",
-        "description": "This service-level alert is triggered if there are unhealthy NodeManagers",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "YARN/2.1.0.2.0/package/alerts/alert_nodemanagers_summary.py",
-          "parameters": [
-            {
-              "name": "connection.timeout",
-              "display_name": "Connection Timeout",
-              "value": 5.0,
-              "type": "NUMERIC",
-              "description": "The maximum time before this alert is considered to be CRITICAL",
-              "units": "seconds",
-              "threshold": "CRITICAL"
-            }
-          ]
-        }
-      }
-    ],
-    "APP_TIMELINE_SERVER": [
-      {
-        "name": "yarn_app_timeline_server_webui",
-        "label": "App Timeline Web UI",
-        "description": "This host-level alert is triggered if the App Timeline Server Web UI is unreachable.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{yarn-site/yarn.timeline-service.webapp.address}}/ws/v1/timeline",
-            "https": "{{yarn-site/yarn.timeline-service.webapp.https.address}}/ws/v1/timeline",
-            "https_property": "{{yarn-site/yarn.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{yarn-site/yarn.timeline-service.http-authentication.kerberos.principal}}",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      }
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-env.xml
deleted file mode 100644
index 2ac0bff..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-env.xml
+++ /dev/null
@@ -1,104 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>mapred_log_dir_prefix</name>
-    <value>/var/log/hadoop-mapreduce</value>
-    <display-name>Mapreduce Log Dir Prefix</display-name>
-    <description>Mapreduce Log Dir Prefix</description>
-    <value-attributes>
-      <type>directory</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapred_pid_dir_prefix</name>
-    <value>/var/run/hadoop-mapreduce</value>
-    <display-name>Mapreduce PID Dir Prefix</display-name>
-    <description>Mapreduce PID Dir Prefix</description>
-    <value-attributes>
-      <type>directory</type>
-      <overridable>false</overridable>
-      <editable-only-at-install>true</editable-only-at-install>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <display-name>Mapreduce User</display-name>
-    <value>mapred</value>
-    <property-type>USER</property-type>
-    <description>Mapreduce User</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>jobhistory_heapsize</name>
-    <display-name>History Server heap size</display-name>
-    <value>900</value>
-    <description>Value for JobHistoryServer heap_size variable in hadoop-env.sh</description>
-    <value-attributes>
-      <unit>MB</unit>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapred_user_nofile_limit</name>
-    <value>32768</value>
-    <description>Max open files limit setting for MAPREDUCE user.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapred_user_nproc_limit</name>
-    <value>65536</value>
-    <description>Max number of processes limit setting for MAPREDUCE user.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>mapred-env template</display-name>
-    <description>This is the jinja template for mapred-env.sh file</description>
-    <value>
-      # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-
-      export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
-
-      export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
-
-      #export HADOOP_JOB_HISTORYSERVER_OPTS=
-      #export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
-      #export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
-      #export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
-      #export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
-      #export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-logsearch-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-logsearch-conf.xml
deleted file mode 100644
index 3c0abbf..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-logsearch-conf.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>MapReduce</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>HISTORYSERVER:mapred_historyserver</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"mapred_historyserver",
-      "rowtype":"service",
-      "path":"{{default('/configurations/mapred-env/mapred_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/mapred-env/mapred_user', 'mapred')}}/mapred-{{default('configurations/mapred-env/mapred_user', 'mapred')}}-historyserver*.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "mapred_historyserver"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-site.xml
deleted file mode 100644
index e51107a..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-site.xml
+++ /dev/null
@@ -1,540 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Put site-specific property overrides in this file. -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
-  <!-- i/o properties -->
-  <property>
-    <name>mapreduce.task.io.sort.mb</name>
-    <value>358</value>
-    <description>
-      The total amount of buffer memory to use while sorting files, in megabytes.
-      By default, gives each merge stream 1MB, which should minimize seeks.
-    </description>
-    <display-name>Sort Allocation Memory</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>2047</maximum>
-      <unit>MB</unit>
-      <increment-step>1</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>mapred-site</type>
-        <name>mapreduce.map.memory.mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.map.sort.spill.percent</name>
-    <value>0.7</value>
-    <description>
-      The soft limit in the serialization buffer. Once reached, a thread will
-      begin to spill the contents to disk in the background. Note that
-      collection will not block if this threshold is exceeded while a spill
-      is already in progress, so spills may be larger than this threshold when
-      it is set to less than .5
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.task.io.sort.factor</name>
-    <value>100</value>
-    <description>
-      The number of streams to merge at once while sorting files.
-      This determines the number of open file handles.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- map/reduce properties -->
-  <property>
-    <name>mapreduce.cluster.administrators</name>
-    <value> hadoop</value>
-    <description>
-      Administrators for MapReduce applications.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.parallelcopies</name>
-    <value>30</value>
-    <description>
-      The default number of parallel transfers run by reduce during
-      the copy(shuffle) phase.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.map.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some map tasks
-      may be executed in parallel.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some reduce tasks may be
-      executed in parallel.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
-    <value>0.05</value>
-    <description>
-      Fraction of the number of maps in the job which should be complete before
-      reduces are scheduled for the job.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.job.counters.max</name>
-    <value>130</value>
-    <description>
-      Limit on the number of counters allowed per job.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>
-      The usage threshold at which an in-memory merge will be
-      initiated, expressed as a percentage of the total memory allocated to
-      storing in-memory map outputs, as defined by
-      mapreduce.reduce.shuffle.input.buffer.percent.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>
-      The percentage of memory to be allocated from the maximum heap
-      size to storing map outputs during the shuffle.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress.type</name>
-    <value>BLOCK</value>
-    <description>
-      If the job outputs are to compressed as SequenceFiles, how should
-      they be compressed? Should be one of NONE, RECORD or BLOCK.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>
-      The percentage of memory- relative to the maximum heap size- to
-      retain map outputs during the reduce. When the shuffle is concluded, any
-      remaining map outputs in memory must consume less than this threshold before
-      the reduce can begin.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapreduce.map.output.compress</name>
-    <value>false</value>
-    <description>
-      Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.task.timeout</name>
-    <value>300000</value>
-    <description>
-      The number of milliseconds before a task will be
-      terminated if it neither reads an input, writes an output, nor
-      updates its status string.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.map.memory.mb</name>
-    <value>512</value>
-    <description>Virtual memory for single Map task</description>
-    <display-name>Map Memory</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>512</minimum>
-      <maximum>5120</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.scheduler.maximum-allocation-mb</name>
-      </property>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.scheduler.minimum-allocation-mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Reduce task</description>
-    <display-name>Reduce Memory</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>512</minimum>
-      <maximum>5120</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.scheduler.maximum-allocation-mb</name>
-      </property>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.scheduler.minimum-allocation-mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.shuffle.port</name>
-    <value>13562</value>
-    <description>
-      Default port that the ShuffleHandler will run on.
-      ShuffleHandler is a service run at the NodeManager to facilitate
-      transfers of intermediate Map outputs to requesting Reducers.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.intermediate-done-dir</name>
-    <value>/mr-history/tmp</value>
-    <description>
-      Directory where history files are written by MapReduce jobs.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.done-dir</name>
-    <value>/mr-history/done</value>
-    <description>
-      Directory where history files are managed by the MR JobHistory Server.
-    </description>
-    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.address</name>
-    <value>localhost:10020</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.webapp.address</name>
-    <value>localhost:19888</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>mapreduce.framework.name</name>
-    <value>yarn</value>
-    <description>
-      The runtime framework for executing MapReduce jobs. Can be one of local,
-      classic or yarn.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.app.mapreduce.am.staging-dir</name>
-    <value>/user</value>
-    <description>
-      The staging dir used while submitting jobs.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.app.mapreduce.am.resource.mb</name>
-    <value>512</value>
-    <description>The amount of memory the MR AppMaster needs.</description>
-    <display-name>AppMaster Memory</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>512</minimum>
-      <maximum>5120</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.scheduler.maximum-allocation-mb</name>
-      </property>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.scheduler.minimum-allocation-mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.app.mapreduce.am.command-opts</name>
-    <value>-Xmx410m</value>
-    <description>
-      Java opts for the MR App Master processes.
-      The following symbol, if present, will be interpolated: @taskid@ is replaced
-      by current TaskID. Any other occurrences of '@' will go unchanged.
-      For example, to enable verbose gc logging to a file named for the taskid in
-      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-    <display-name>MR AppMaster Java Heap Size</display-name>
-    <depends-on>
-      <property>
-        <type>mapred-site</type>
-        <name>yarn.app.mapreduce.am.resource.mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.app.mapreduce.am.admin-command-opts</name>
-    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>
-      Java opts for the MR App Master processes for admin purposes.
-      It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
-      thus its options can be overridden user.
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-    <display-name>MR AppMaster Java Heap Size</display-name>
-    <depends-on>
-      <property>
-        <type>mapred-site</type>
-        <name>yarn.app.mapreduce.am.resource.mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.app.mapreduce.am.log.level</name>
-    <value>INFO</value>
-    <description>MR App Master process log level.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.admin.map.child.java.opts</name>
-    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>This property stores Java options for map tasks.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.admin.reduce.child.java.opts</name>
-    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>This property stores Java options for reduce tasks.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.application.classpath</name>
-    <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
-    <description>
-      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
-      entries.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It is a
-      application-specific setting. It should not be larger than the global number
-      set by resourcemanager. Otherwise, it will be override. The default number is
-      set to 2, to allow at least one retry for AM.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.map.java.opts</name>
-    <value>-Xmx410m</value>
-    <description>
-      Larger heap-size for child jvms of maps.
-    </description>
-    <display-name>MR Map Java Heap Size</display-name>
-    <depends-on>
-      <property>
-        <type>mapred-site</type>
-        <name>mapreduce.map.memory.mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.java.opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Larger heap-size for child jvms of reduces.
-    </description>
-    <display-name>MR Reduce Java Heap Size</display-name>
-    <depends-on>
-      <property>
-        <type>mapred-site</type>
-        <name>mapreduce.reduce.memory.mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.map.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the map task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the reduce task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/Linux-amd64-64</value>
-    <description>
-      Additional execution environment entries for map and reduce task processes.
-      This is not an additive property. You must preserve the original value if
-      you want your map and reduce tasks to have access to native libraries (compression, etc)
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress</name>
-    <value>false</value>
-    <description>
-      Should the job outputs be compressed?
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.http.policy</name>
-    <value>HTTP_ONLY</value>
-    <description>
-      This configures the HTTP endpoint for JobHistoryServer web UI.
-      The following values are supported: - HTTP_ONLY : Service is provided only
-      on http - HTTPS_ONLY : Service is provided only on https
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.job.queuename</name>
-    <value>default</value>
-    <description>
-      Queue to which a job is submitted.
-    </description>
-    <depends-on>
-      <property>
-        <type>capacity-scheduler</type>
-        <name>yarn.scheduler.capacity.root.queues</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>mapreduce.reduce.shuffle.fetch.retry.enabled</name>
-    <value>1</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.fetch.retry.interval-ms</name>
-    <value>1000</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name>
-    <value>30000</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.job.emit-timeline-data</name>
-    <value>false</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.bind-host</name>
-    <value>0.0.0.0</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.3 -->
-  <property>
-    <name>mapreduce.jobhistory.recovery.enable</name>
-    <value>true</value>
-    <description>Enable the history server to store server state and recover
-      server state upon startup.  If enabled then
-      mapreduce.jobhistory.recovery.store.class must be specified.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.recovery.store.class</name>
-    <value>org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService</value>
-    <description>The HistoryServerStateStoreService class to store history server
-      state for recovery.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.recovery.store.leveldb.path</name>
-    <value>/hadoop/mapreduce/jhs</value>
-    <description>The URI where history server state will be stored if HistoryServerLeveldbSystemStateStoreService
-      is configured as the recovery storage class.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/capacity-scheduler.xml
deleted file mode 100644
index 320a629..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,183 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>10000</value>
-    <description>
-      Maximum number of applications that can be pending and running.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-    <value>0.2</value>
-    <description>
-      Maximum percent of resources in the cluster which can be used to run 
-      application masters i.e. controls number of concurrent running
-      applications.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>default</value>
-    <description>
-      The queues at the this level (root is the root queue).
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.capacity</name>
-    <value>100</value>
-    <description>
-      The total capacity as a percentage out of 100 for this queue.
-      If it has child queues then this includes their capacity as well.
-      The child queues capacity should add up to their parent queue's capacity
-      or less.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>100</value>
-    <description>Default queue target capacity.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
-    <value>1</value>
-    <description>
-      Default queue user limit a percentage from 0.0 to 1.0.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
-    <value>100</value>
-    <description>
-      The maximum capacity of the default queue. 
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.state</name>
-    <value>RUNNING</value>
-    <description>
-      The state of the default queue. State can be one of RUNNING or STOPPED.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
-    <value>*</value>
-    <description>
-      The ACL of who can submit jobs to the default queue.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
-    <value>*</value>
-    <description>
-      The ACL of who can administer jobs on the default queue.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.acl_administer_queue</name>
-    <value>*</value>
-    <description>
-      The ACL for who can administer this queue i.e. change sub-queue 
-      allocations.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.node-locality-delay</name>
-    <value>40</value>
-    <description>
-      Number of missed scheduling opportunities after which the CapacityScheduler
-      attempts to schedule rack-local containers.
-      Typically this should be set to number of nodes in the cluster, By default is setting
-      approximately number of nodes in one rack which is 40.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>
-      Default minimum queue resource limit depends on the number of users who have submitted applications.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>yarn.scheduler.capacity.resource-calculator</name>
-    <description>
-      The ResourceCalculator implementation to be used to compare Resources in the scheduler.
-      The default i.e. org.apache.hadoop.yarn.util.resource.DefaultResourseCalculator only uses
-      Memory while DominantResourceCalculator uses Dominant-resource to compare multi-dimensional
-      resources such as Memory, CPU etc. A Java ResourceCalculator class name is expected.
-    </description>
-    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
-    <display-name>CPU Scheduling</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>org.apache.hadoop.yarn.util.resource.DominantResourceCalculator</value>
-          <label>Enabled</label>
-        </entry>
-        <entry>
-          <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
-          <label>Disabled</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.accessible-node-labels</name>
-    <value>*</value>
-    <description/>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- In HDP 2.3, yarn.scheduler.capacity.root.default-node-label-expression was deleted -->
-
-  <!-- These configs were inherited from HDP 2.5 -->
-  <property>
-    <name>capacity-scheduler</name>
-    <description>Enter key=value (one per line) for all properties of capacity-scheduler.xml</description>
-    <depends-on>
-      <property>
-        <type>hive-interactive-env</type>
-        <name>enable_hive_interactive</name>
-      </property>
-      <property>
-        <type>hive-interactive-env</type>
-        <name>llap_queue_capacity</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-audit.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-audit.xml
deleted file mode 100644
index a6b1baa..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-audit.xml
+++ /dev/null
@@ -1,177 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>xasecure.audit.is.enabled</name>
-    <value>true</value>
-    <description>Is Audit enabled?</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db</name>
-    <value>false</value>
-    <display-name>Audit to DB</display-name>
-    <description>Is Audit to DB enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.db</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.jdbc.url</name>
-    <value>{{audit_jdbc_url}}</value>
-    <description>Audit DB JDBC URL</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.user</name>
-    <value>{{xa_audit_db_user}}</value>
-    <description>Audit DB JDBC User</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.password</name>
-    <value>crypted</value>
-    <property-type>PASSWORD</property-type>
-    <description>Audit DB JDBC Password</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.jdbc.driver</name>
-    <value>{{jdbc_driver}}</value>
-    <description>Audit DB JDBC Driver</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.credential.provider.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>Credential file store</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
-    <value>/var/log/hadoop/yarn/audit/db/spool</value>
-    <description>/var/log/hadoop/yarn/audit/db/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs</name>
-    <value>true</value>
-    <display-name>Audit to HDFS</display-name>
-    <description>Is Audit to HDFS enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.hdfs</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs.dir</name>
-    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
-    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.hdfs.dir</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
-    <value>/var/log/hadoop/yarn/audit/hdfs/spool</value>
-    <description>/var/log/hadoop/yarn/audit/hdfs/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr</name>
-    <value>false</value>
-    <display-name>Audit to SOLR</display-name>
-    <description>Is Solr audit enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.solr</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.urls</name>
-    <value/>
-    <description>Solr URL</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-admin-site</type>
-        <name>ranger.audit.solr.urls</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.zookeepers</name>
-    <value>NONE</value>
-    <description>Solr Zookeeper string</description>
-    <depends-on>
-      <property>
-        <type>ranger-admin-site</type>
-        <name>ranger.audit.solr.zookeepers</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
-    <value>/var/log/hadoop/yarn/audit/solr/spool</value>
-    <description>/var/log/hadoop/yarn/audit/solr/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.provider.summary.enabled</name>
-    <value>false</value>
-    <display-name>Audit provider summary enabled</display-name>
-    <description>Enable Summary audit?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-plugin-properties.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-plugin-properties.xml
deleted file mode 100644
index 97867cc..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/ranger-yarn-plugin-properties.xml
+++ /dev/null
@@ -1,82 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
-  <property>
-    <name>policy_user</name>
-    <value>ambari-qa</value>
-    <display-name>Policy user for YARN</display-name>
-    <description>This user must be system user and also present at Ranger admin portal</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hadoop.rpc.protection</name>
-    <value/>
-    <description>Used for repository creation on ranger admin</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>common.name.for.certificate</name>
-    <value/>
-    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger-yarn-plugin-enabled</name>
-    <value>No</value>
-    <display-name>Enable Ranger for YARN</display-name>
-    <description>Enable ranger yarn plugin ?</description>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>ranger-yarn-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <value-attributes>
-      <type>boolean</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>REPOSITORY_CONFIG_USERNAME</name>
-    <value>yarn</value>
-    <display-name>Ranger repository config user</display-name>
-    <description>Used for repository creation on ranger admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>REPOSITORY_CONFIG_PASSWORD</name>
-    <value>yarn</value>
-    <display-name>Ranger repository config password</display-name>
-    <property-type>PASSWORD</property-type>
-    <description>Used for repository creation on ranger admin</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>


[14/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/balancer-emulator/balancer-err.log
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/balancer-emulator/balancer-err.log b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/balancer-emulator/balancer-err.log
deleted file mode 100644
index d7c6704..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/balancer-emulator/balancer-err.log
+++ /dev/null
@@ -1,1032 +0,0 @@
-14/07/28 17:01:48 INFO balancer.Balancer: Using a threshold of 5.0
-14/07/28 17:01:48 INFO balancer.Balancer: namenodes = [hdfs://evhubudsd1aae.budapest.epam.com:8020]
-14/07/28 17:01:48 INFO balancer.Balancer: p         = Balancer.Parameters[BalancingPolicy.Node, threshold=5.0]
-14/07/28 17:01:49 INFO balancer.Balancer: Block token params received from NN: keyUpdateInterval=600 min(s), tokenLifetime=600 min(s)
-14/07/28 17:01:49 INFO block.BlockTokenSecretManager: Setting block keys
-14/07/28 17:01:49 INFO balancer.Balancer: Balancer will update its block keys every 150 minute(s)
-14/07/28 17:01:49 INFO block.BlockTokenSecretManager: Setting block keys
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:01:49 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.887235026238486]]
-14/07/28 17:01:49 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.178140109955496]]
-14/07/28 17:01:49 INFO balancer.Balancer: Need to move 5.74 GB to make the cluster balanced.
-14/07/28 17:01:49 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:01:49 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:01:57 INFO balancer.Balancer: Moving block 1073950748 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:01:58 INFO balancer.Balancer: Moving block 1073939272 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:02:06 INFO balancer.Balancer: Moving block 1073863504 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:02:13 INFO balancer.Balancer: Moving block 1073863516 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:02:31 INFO balancer.Balancer: Moving block 1073743089 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:03:00 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.803451571241915]]
-14/07/28 17:03:00 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.262867215362437]]
-14/07/28 17:03:00 INFO balancer.Balancer: Need to move 5.58 GB to make the cluster balanced.
-14/07/28 17:03:00 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:03:00 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073937443 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073926003 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073916372 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073926002 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073920016 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:03:05 INFO balancer.Balancer: Moving block 1073937461 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:03:11 INFO balancer.Balancer: Moving block 1073743437 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:03:20 INFO balancer.Balancer: Moving block 1073743443 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:03:31 INFO balancer.Balancer: Moving block 1073743449 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:03:34 INFO balancer.Balancer: Moving block 1073743440 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:04:07 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.70875539052811]]
-14/07/28 17:04:07 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.35756339607624]]
-14/07/28 17:04:07 INFO balancer.Balancer: Need to move 5.40 GB to make the cluster balanced.
-14/07/28 17:04:07 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:04:07 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:04:07 INFO balancer.Balancer: Moving block 1073743776 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073915941 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073930160 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073930161 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073908316 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:04:09 INFO balancer.Balancer: Moving block 1073930163 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:04:51 INFO balancer.Balancer: Moving block 1073947549 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:05:04 INFO balancer.Balancer: Moving block 1073863141 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:05:06 INFO balancer.Balancer: Moving block 1073863139 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:05:14 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.53815392807349]]
-14/07/28 17:05:14 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.528164858530864]]
-14/07/28 17:05:14 INFO balancer.Balancer: Need to move 5.06 GB to make the cluster balanced.
-14/07/28 17:05:14 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:05:14 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945158 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918874 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918873 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945162 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918867 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945160 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073914540 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918868 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073931861 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:05:50 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.538117645568114]]
-14/07/28 17:05:50 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.52820114103624]]
-14/07/28 17:05:50 INFO balancer.Balancer: Need to move 5.06 GB to make the cluster balanced.
-14/07/28 17:05:50 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:05:50 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073916888 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073925481 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073920767 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073908143 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073911961 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073929306 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:09 INFO balancer.Balancer: Moving block 1073863170 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:06:33 INFO balancer.Balancer: Moving block 1073929250 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:35 INFO balancer.Balancer: Moving block 1073863186 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:06:56 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.407811418798076]]
-14/07/28 17:06:56 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.658507367806276]]
-14/07/28 17:06:56 INFO balancer.Balancer: Need to move 4.81 GB to make the cluster balanced.
-14/07/28 17:06:56 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:06:56 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:06:56 INFO balancer.Balancer: Moving block 1073919724 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:56 INFO balancer.Balancer: Moving block 1073915864 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073910902 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073949844 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073926217 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073919721 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073926320 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073946575 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073949843 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:07:33 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.4068167244793]]
-14/07/28 17:07:33 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.659502062125057]]
-14/07/28 17:07:33 INFO balancer.Balancer: Need to move 4.80 GB to make the cluster balanced.
-14/07/28 17:07:33 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:07:33 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:07:33 INFO balancer.Balancer: Moving block 1073948620 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:07:33 INFO balancer.Balancer: Moving block 1073917051 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:07:34 INFO balancer.Balancer: Moving block 1073924651 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:07:40 INFO balancer.Balancer: Moving block 1073742834 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:08:55 INFO balancer.Balancer: Moving block 1073894040 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:08:56 INFO balancer.Balancer: Moving block 1073932476 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:08:59 INFO balancer.Balancer: Moving block 1073742598 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:09:00 INFO balancer.Balancer: Moving block 1073893997 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:09:11 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.144332676814294]]
-14/07/28 17:09:11 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.92198610979006]]
-14/07/28 17:09:11 INFO balancer.Balancer: Need to move 4.29 GB to make the cluster balanced.
-14/07/28 17:09:11 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:09:11 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920127 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743556 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743557 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073929950 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073942945 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920115 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743559 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073947343 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920075 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:09:47 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.14396676101451]]
-14/07/28 17:09:47 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.92215625345692]]
-14/07/28 17:09:47 INFO balancer.Balancer: Need to move 4.29 GB to make the cluster balanced.
-14/07/28 17:09:47 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:09:47 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951772 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951752 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951754 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951766 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:52 INFO balancer.Balancer: Moving block 1073951747 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:56 INFO balancer.Balancer: Moving block 1073951765 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:10:53 INFO balancer.Balancer: Moving block 1073951746 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:10:54 INFO balancer.Balancer: Moving block 1073951745 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:10:54 INFO balancer.Balancer: Moving block 1073951744 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:11:24 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.9413931647133]]
-14/07/28 17:11:24 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:11:24 INFO balancer.Balancer: Need to move 3.89 GB to make the cluster balanced.
-14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 5.84 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 2.64 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
-14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 1.31 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:11:24 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073940539 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073940537 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073927798 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073935420 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073927775 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073923954 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073918163 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073949253 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073931581 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073923922 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073931532 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073949248 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073923928 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073927787 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073949252 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073906578 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073914353 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:11:30 INFO balancer.Balancer: Moving block 1073931557 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:30 INFO balancer.Balancer: Moving block 1073910459 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:12:00 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.923538618186065]]
-14/07/28 17:12:00 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:12:00 INFO balancer.Balancer: Need to move 3.86 GB to make the cluster balanced.
-14/07/28 17:12:00 INFO balancer.Balancer: Decided to move 2.61 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
-14/07/28 17:12:00 INFO balancer.Balancer: Decided to move 7.18 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:12:00 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073949133 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.7:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945194 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927453 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073923118 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905689 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073914494 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905688 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073923119 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073914488 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905681 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905677 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927648 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945235 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945226 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073910053 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927664 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:12:29 INFO balancer.Balancer: Moving block 1073905173 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:13:19 INFO balancer.Balancer: Moving block 1073905177 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:19 INFO balancer.Balancer: Moving block 1073905171 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:21 INFO balancer.Balancer: Moving block 1073905175 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:27 INFO balancer.Balancer: Moving block 1073905172 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:13:37 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.60177342833359]]
-14/07/28 17:13:37 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:13:37 INFO balancer.Balancer: Need to move 3.23 GB to make the cluster balanced.
-14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 1.73 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
-14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 375.17 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
-14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 1.00 GB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
-14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 3.03 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:13:37 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073914692 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073927391 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073927383 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923582 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073905952 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073914693 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923467 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073918495 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.10:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923466 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073948829 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945548 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073948902 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.10:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945546 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073905987 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945549 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073918570 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945542 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073927370 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073914708 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.8:50010 is succeeded.
-14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073948908 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.1:50010 is succeeded.
-14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073918565 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073923572 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:13:46 INFO balancer.Balancer: Moving block 1073936056 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:49 INFO balancer.Balancer: Moving block 1073936057 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:52 INFO balancer.Balancer: Moving block 1073936063 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:14:09 INFO balancer.Balancer: Moving block 1073936045 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:14:09 INFO balancer.Balancer: Moving block 1073936034 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:14:40 INFO balancer.Balancer: Moving block 1073936032 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.10:50010 is succeeded.
-14/07/28 17:14:40 INFO balancer.Balancer: Moving block 1073936033 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:14:41 INFO balancer.Balancer: Moving block 1073936036 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:15:13 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.2458785989085]]
-14/07/28 17:15:13 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:15:13 INFO balancer.Balancer: Need to move 2.53 GB to make the cluster balanced.
-14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 5.46 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 683.02 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
-14/07/28 17:15:13 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934407 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073926699 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073907624 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073930612 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073950332 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934387 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073930508 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934414 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073945924 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073922816 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073934411 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073926698 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073922838 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073919113 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073922843 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073907649 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073950223 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:15:49 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.23893576243181]]
-14/07/28 17:15:49 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:15:49 INFO balancer.Balancer: Need to move 2.52 GB to make the cluster balanced.
-14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 375.06 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
-14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 4.44 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
-14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 1.33 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:15:49 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073931740 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073927810 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073923141 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073910191 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073905793 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073940704 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949348 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936134 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073914594 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949356 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.8:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936148 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936164 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936158 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949359 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073918912 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073914616 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936151 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073923999 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:15:50 INFO balancer.Balancer: Moving block 1073940722 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073927855 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073906497 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073949350 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.224:50010 is succeeded.
-14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073945051 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:16:25 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.236639727566796]]
-14/07/28 17:16:25 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:16:25 INFO balancer.Balancer: Need to move 2.51 GB to make the cluster balanced.
-14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 2.36 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
-14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 1.53 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
-14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 5.45 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 463.99 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
-14/07/28 17:16:25 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073942946 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947339 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073912361 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073926131 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947341 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073929961 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743570 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916254 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743604 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743581 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073926130 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073920078 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916287 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073933727 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073908503 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743586 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743580 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073937539 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073908497 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073942916 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743590 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947329 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743599 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743600 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073895265 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073937542 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916258 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916286 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.1:50010 is succeeded.
-14/07/28 17:16:47 INFO balancer.Balancer: Moving block 1073862841 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:17:01 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.1720712908457]]
-14/07/28 17:17:01 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:17:01 INFO balancer.Balancer: Need to move 2.39 GB to make the cluster balanced.
-14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 5.45 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 698.32 MB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
-14/07/28 17:17:01 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915689 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073946573 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915690 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915841 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073919491 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915694 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.10:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915842 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073949829 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073895888 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949830 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073922418 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073931011 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949848 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073904475 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073946583 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073904561 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949813 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073915703 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073926226 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:17:37 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.17123487505752]]
-14/07/28 17:17:37 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:17:37 INFO balancer.Balancer: Need to move 2.38 GB to make the cluster balanced.
-14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 2.23 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
-14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 373.37 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
-14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 4.43 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
-14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 2.76 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:17:37 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951505 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951406 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951465 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951428 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951479 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951294 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951363 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951445 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951368 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951466 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951325 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.224:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951296 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951333 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951315 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951502 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951383 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951489 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951504 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951313 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951326 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951310 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:44 INFO balancer.Balancer: Moving block 1073951520 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.1:50010 is succeeded.
-14/07/28 17:17:44 INFO balancer.Balancer: Moving block 1073864141 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:18:14 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.13074467796647]]
-14/07/28 17:18:14 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:18:14 INFO balancer.Balancer: Need to move 2.31 GB to make the cluster balanced.
-14/07/28 17:18:14 INFO balancer.Balancer: Decided to move 9.08 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:18:14 INFO balancer.Balancer: Decided to move 729.65 MB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
-14/07/28 17:18:14 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935830 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931492 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931497 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073913899 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073910416 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928121 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931496 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073927763 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935825 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935414 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928117 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928114 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935419 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073927766 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935418 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073910423 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073949598 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:18:50 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.1305062958578]]
-14/07/28 17:18:50 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:18:50 INFO balancer.Balancer: Need to move 2.30 GB to make the cluster balanced.
-14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 895.07 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
-14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 1.53 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
-14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 7.38 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:18:50 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930642 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073950456 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.10:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934505 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073950457 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.8:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934524 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930646 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073915219 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934502 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930640 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073926854 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934510 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934503 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073926851 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073926857 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073930652 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:18:52 INFO balancer.Balancer: Moving block 1073930651 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:19:02 INFO balancer.Balancer: Moving block 1073934496 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:19:03 INFO balancer.Balancer: Moving block 1073934497 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:19:26 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.07965400229293]]
-14/07/28 17:19:26 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:19:26 INFO balancer.Balancer: Need to move 2.21 GB to make the cluster balanced.
-14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 333.25 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
-14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 4.43 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
-14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 881.78 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
-14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 4.17 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:19:26 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073931910 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073905704 from 10.253.130.9:50010 to 10.253.130.7:50010 thr

<TRUNCATED>

[16/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metrics.json b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metrics.json
deleted file mode 100644
index c66387d..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metrics.json
+++ /dev/null
@@ -1,7905 +0,0 @@
-{
-  "NAMENODE": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/cpu/cpu_idle":{
-              "metric":"cpu_idle",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/cpu/cpu_nice":{
-              "metric":"cpu_nice",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/cpu/cpu_system":{
-              "metric":"cpu_system",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/cpu/cpu_user":{
-              "metric":"cpu_user",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/cpu/cpu_wio":{
-              "metric":"cpu_wio",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/disk/disk_free":{
-              "metric":"disk_free",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/disk/disk_total":{
-              "metric":"disk_total",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/load/load_fifteen":{
-              "metric":"load_fifteen",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/load/load_five":{
-              "metric":"load_five",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/load/load_one":{
-              "metric":"load_one",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/memory/mem_buffers":{
-              "metric":"mem_buffers",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/memory/mem_cached":{
-              "metric":"mem_cached",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/memory/mem_free":{
-              "metric":"mem_free",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/memory/mem_shared":{
-              "metric":"mem_shared",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/memory/mem_total":{
-              "metric":"mem_total",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/memory/swap_free":{
-              "metric":"swap_free",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/memory/swap_total":{
-              "metric":"swap_total",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/network/bytes_in":{
-              "metric":"bytes_in",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/network/bytes_out":{
-              "metric":"bytes_out",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/network/pkts_in":{
-              "metric":"pkts_in",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/network/pkts_out":{
-              "metric":"pkts_out",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/process/proc_run":{
-              "metric":"proc_run",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/process/proc_total":{
-              "metric":"proc_total",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/disk/read_count":{
-              "metric":"read_count",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/disk/write_count":{
-              "metric":"write_count",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/disk/read_bytes":{
-              "metric":"read_bytes",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/disk/write_bytes":{
-              "metric":"write_bytes",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/disk/read_time":{
-              "metric":"read_time",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/disk/write_time":{
-              "metric":"write_time",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/dfs/FSNamesystem/TotalLoad": {
-              "metric": "dfs.FSNamesystem.TotalLoad",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityTotal": {
-              "metric": "dfs.FSNamesystem.CapacityTotal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityUsed": {
-              "metric": "dfs.FSNamesystem.CapacityUsed",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityRemaining": {
-              "metric": "dfs.FSNamesystem.CapacityRemaining",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
-              "metric": "dfs.FSNamesystem.CapacityUsedNonDFS",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/BlockCapacity": {
-              "metric": "dfs.FSNamesystem.BlockCapacity",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/GetListingOps": {
-              "metric": "dfs.namenode.GetListingOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesAppended": {
-              "metric": "dfs.namenode.FilesAppended",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/TotalFileOps": {
-              "metric": "dfs.namenode.TotalFileOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/fsync_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/renewLease_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getFileInfo_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "unit": "MB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/complete_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setPermission_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-              "metric": "dfs.FSNamesystem.CapacityTotalGB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setOwner_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getBlockLocations_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-              "metric": "dfs.FSNamesystem.CapacityUsedGB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/AddBlockOps": {
-              "metric": "dfs.namenode.AddBlockOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesDeleted": {
-              "metric": "dfs.namenode.FilesDeleted",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/Syncs_avg_time": {
-              "metric": "dfs.namenode.SyncsAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/blockReport_avg_time": {
-              "metric": "dfs.namenode.BlockReportAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/disk/part_max_used": {
-              "metric": "part_max_used",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getFileInfo_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getEditLogSize_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blockReceived_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/versionRequest_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/versionRequest_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/addBlock_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesCreated": {
-              "metric": "dfs.namenode.FilesCreated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rename_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setSafeMode_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setPermission_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesRenamed": {
-              "metric": "dfs.namenode.FilesRenamed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/register_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setReplication_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.SetReplicationNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/GetBlockLocations": {
-              "metric": "dfs.namenode.GetBlockLocations",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/fsync_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/create_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-              "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/delete_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FileInfoOps": {
-              "metric": "dfs.namenode.FileInfoOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/DeleteFileOps": {
-              "metric": "dfs.namenode.DeleteFileOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blockReport_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setSafeMode_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-              "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "rpc.rpc.RpcAuthenticationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getEditLogSize_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesInGetListingOps": {
-              "metric": "dfs.namenode.FilesInGetListingOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/complete_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "rpc.rpc.RpcAuthorizationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/Syncs_num_ops": {
-              "metric": "dfs.namenode.SyncsNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blockReceived_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setReplication_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.SetReplicationAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rollEditLog_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/FilesTotal": {
-              "metric": "dfs.FSNamesystem.FilesTotal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/ExcessBlocks": {
-              "metric": "dfs.FSNamesystem.ExcessBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/blockReport_num_ops": {
-              "metric": "dfs.namenode.BlockReportNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/SafemodeTime": {
-              "metric": "dfs.namenode.SafemodeTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/mkdirs_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "unit": "MB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-              "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/BlocksTotal": {
-              "metric": "dfs.FSNamesystem.BlocksTotal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getBlockLocations_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/Transactions_num_ops": {
-              "metric": "dfs.namenode.TransactionsNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/create_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-              "metric": "dfs.FSNamesystem.CapacityRemainingGB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/Transactions_avg_time": {
-              "metric": "dfs.namenode.TransactionsAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/boottime": {
-              "metric": "boottime",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/dfs/FSNamesystem/MissingBlocks": {
-              "metric": "dfs.FSNamesystem.MissingBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/delete_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CorruptBlocks": {
-              "metric": "dfs.FSNamesystem.CorruptBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rename_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blockReport_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/mkdirs_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/fsImageLoadTime": {
-              "metric": "dfs.namenode.FsImageLoadTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getListing_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rollEditLog_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/addBlock_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setOwner_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-              "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/CreateFileOps": {
-              "metric": "dfs.namenode.CreateFileOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/register_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getListing_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/renewLease_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/ElapsedTime": {
-              "metric": "default.StartupProgress.ElapsedTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/LoadingEditsCount": {
-              "metric": "default.StartupProgress.LoadingEditsCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/LoadingEditsElapsedTime": {
-              "metric": "default.StartupProgress.LoadingEditsElapsedTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/LoadingEditsPercentComplete": {
-              "metric": "default.StartupProgress.LoadingEditsPercentComplete",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/LoadingEditsTotal": {
-              "metric": "default.StartupProgress.LoadingEditsTotal",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/LoadingFsImageCount": {
-              "metric": "default.StartupProgress.LoadingFsImageCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/LoadingFsImageElapsedTime": {
-              "metric": "default.StartupProgress.LoadingFsImageElapsedTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/LoadingFsImagePercentComplete": {
-              "metric": "default.StartupProgress.LoadingFsImagePercentComplete",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/LoadingFsImageTotal": {
-              "metric": "default.StartupProgress.LoadingFsImageTotal",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/PercentComplete": {
-              "metric": "default.StartupProgress.PercentComplete",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/SafeModeCount": {
-              "metric": "default.StartupProgress.SafeModeCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/SafeModeElapsedTime": {
-              "metric": "default.StartupProgress.SafeModeElapsedTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/SafeModePercentComplete": {
-              "metric": "default.StartupProgress.SafeModePercentComplete",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/SafeModeTotal": {
-              "metric": "default.StartupProgress.SafeModeTotal",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/SavingCheckpointCount": {
-              "metric": "default.StartupProgress.SavingCheckpointCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/SavingCheckpointElapsedTime": {
-              "metric": "default.StartupProgress.SavingCheckpointElapsedTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/SavingCheckpointPercentComplete": {
-              "metric": "default.StartupProgress.SavingCheckpointPercentComplete",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/default/StartupProgress/SavingCheckpointTotal": {
-              "metric": "default.StartupProgress.SavingCheckpointTotal",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/ExpiredHeartbeats": {
-              "metric": "dfs.FSNamesystem.ExpiredHeartbeats",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/LastCheckpointTime": {
-              "metric": "dfs.FSNamesystem.LastCheckpointTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/LastWrittenTransactionId": {
-              "metric": "dfs.FSNamesystem.LastWrittenTransactionId",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/MillisSinceLastLoadedEdits": {
-              "metric": "dfs.FSNamesystem.MillisSinceLastLoadedEdits",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/MissingReplOneBlocks": {
-              "metric": "dfs.FSNamesystem.MissingReplOneBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/PendingDataNodeMessageCount": {
-              "metric": "dfs.FSNamesystem.PendingDataNodeMessageCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/PostponedMisreplicatedBlocks": {
-              "metric": "dfs.FSNamesystem.PostponedMisreplicatedBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/Snapshots": {
-              "metric": "dfs.FSNamesystem.Snapshots",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/SnapshottableDirectories": {
-              "metric": "dfs.FSNamesystem.SnapshottableDirectories",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/StaleDataNodes": {
-              "metric": "dfs.FSNamesystem.StaleDataNodes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/TotalFiles": {
-              "metric": "dfs.FSNamesystem.TotalFiles",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/TransactionsSinceLastCheckpoint": {
-              "metric": "dfs.FSNamesystem.TransactionsSinceLastCheckpoint",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/TransactionsSinceLastLogRoll": {
-              "metric": "dfs.FSNamesystem.TransactionsSinceLastLogRoll",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/AllowSnapshotOps": {
-              "metric": "dfs.namenode.AllowSnapshotOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/BlockReceivedAndDeletedOps": {
-              "metric": "dfs.namenode.BlockReceivedAndDeletedOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/CacheReportAvgTime": {
-              "metric": "dfs.namenode.CacheReportAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/CacheReportNumOps": {
-              "metric": "dfs.namenode.CacheReportNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/CreateSnapshotOps": {
-              "metric": "dfs.namenode.CreateSnapshotOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/CreateSymlinkOps": {
-              "metric": "dfs.namenode.CreateSymlinkOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/DeleteSnapshotOps": {
-              "metric": "dfs.namenode.DeleteSnapshotOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/DisallowSnapshotOps": {
-              "metric": "dfs.namenode.DisallowSnapshotOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesTruncated": {
-              "metric": "dfs.namenode.FilesTruncated",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/GetAdditionalDatanodeOps": {
-              "metric": "dfs.namenode.GetAdditionalDatanodeOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/GetEditAvgTime": {
-              "metric": "dfs.namenode.GetEditAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/GetEditNumOps": {
-              "metric": "dfs.namenode.GetEditNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/GetImageAvgTime": {
-              "metric": "dfs.namenode.GetImageAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/GetImageNumOps": {
-              "metric": "dfs.namenode.GetImageNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/GetLinkTargetOps": {
-              "metric": "dfs.namenode.GetLinkTargetOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/ListSnapshottableDirOps": {
-              "metric": "dfs.namenode.ListSnapshottableDirOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/PutImageAvgTime": {
-              "metric": "dfs.namenode.PutImageAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/PutImageNumOps": {
-              "metric": "dfs.namenode.PutImageNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/RenameSnapshotOps": {
-              "metric": "dfs.namenode.RenameSnapshotOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/SnapshotDiffReportOps": {
-              "metric": "dfs.namenode.SnapshotDiffReportOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/StorageBlockReportOps": {
-              "metric": "dfs.namenode.StorageBlockReportOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/TransactionsBatchedInSync": {
-              "metric": "dfs.namenode.TransactionsBatchedInSync",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcCountConcurrentMarkSweep": {
-              "metric": "jvm.JvmMetrics.GcCountConcurrentMarkSweep",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcCountParNew": {
-              "metric": "jvm.JvmMetrics.GcCountParNew",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcNumInfoThresholdExceeded": {
-              "metric": "jvm.JvmMetrics.GcNumInfoThresholdExceeded",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcNumWarnThresholdExceeded": {
-              "metric": "jvm.JvmMetrics.GcNumWarnThresholdExceeded",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcTimeMillisConcurrentMarkSweep": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcTimeMillisParNew": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisParNew",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcTotalExtraSleepTime": {
-              "metric": "jvm.JvmMetrics.GcTotalExtraSleepTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemHeapMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemMaxM": {
-              "metric": "jvm.JvmMetrics.MemMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
-              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
-              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
-              "metric": "metricssystem.MetricsSystem.NumActiveSources",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
-              "metric": "metricssystem.MetricsSystem.NumAllSinks",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumAllSources": {
-              "metric": "metricssystem.MetricsSystem.NumAllSources",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
-              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
-              "metric": "metricssystem.MetricsSystem.PublishNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
-              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
-              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RetryCache/NameNodeRetryCache/CacheCleared": {
-              "metric": "rpc.RetryCache.NameNodeRetryCache.CacheCleared",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RetryCache/NameNodeRetryCache/CacheHit": {
-              "metric": "rpc.RetryCache.NameNodeRetryCache.CacheHit",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RetryCache/NameNodeRetryCache/CacheUpdated": {
-              "metric": "rpc.RetryCache.NameNodeRetryCache.CacheUpdated",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetServerDefaultsAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetServerDefaultsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetServerDefaultsNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetServerDefaultsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetTransactionIdAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetTransactionIdAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetTransactionIdNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetTransactionIdNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/IOExceptionAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.IOExceptionAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/IOExceptionNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.IOExceptionNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/PathIsNotEmptyDirectoryExceptionAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.PathIsNotEmptyDirectoryExceptionAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/PathIsNotEmptyDirectoryExceptionNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.PathIsNotEmptyDirectoryExceptionNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/RecoverLeaseAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.RecoverLeaseAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/RecoverLeaseNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.RecoverLeaseNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/Rename2AvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.Rename2AvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/Rename2NumOps": {
-              "metric": "rpcdetailed.rpcdetailed.Rename2NumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/SetTimesAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.SetTimesAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/SetTimesNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.SetTimesNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
-              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
-              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "default": {
-            "metrics/dfs/namenode/Used": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/TotalLoad": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/TransactionsSinceLastCheckpoint": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.TransactionsSinceLastCheckpoint",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/TransactionsSinceLastLogRoll": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.TransactionsSinceLastLogRoll",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/ExpiredHeartbeats": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.ExpiredHeartbeats",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/LastCheckpointTime": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.LastCheckpointTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/LastWrittenTransactionId": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.LastWrittenTransactionId",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/MillisSinceLastLoadedEdits": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.MillisSinceLastLoadedEdits",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/MissingReplOneBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.MissingReplOneBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/PendingDataNodeMessageCount": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingDataNodeMessageCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/PostponedMisreplicatedBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.PostponedMisreplicatedBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/Snapshots": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.Snapshots",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/SnapshottableDirectories": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.SnapshottableDirectories",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/StaleDataNodes": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.StaleDataNodes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/TotalFiles": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalFiles",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memMaxM": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/BlockCapacity": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/TotalFiles": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/HostName": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.HostName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/GetListingOps": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.GetListingOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/UpgradeFinalized": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/fsync_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/Safemode": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/CorruptBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/LiveNodes": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/renewLease_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getFileInfo_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CapacityRemaining": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/PercentRemaining": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
-              "unit": "MB",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/complete_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getBlockLocations_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/AddBlockOps": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.AddBlockOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Syncs_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/PercentUsed": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/DecomNodes": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/blockReport_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/NonDfsUsedSpace": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/UpgradeFinalized": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getFileInfo_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getEditLogSize_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/blockReceived_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Safemode": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/FilesCreated": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.FilesCreated",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/addBlock_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/DecomNodes": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CapacityUsed": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/NonHeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/DeadNodes": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/PercentUsed": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Free": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Free",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Total": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/GetBlockLocations": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.GetBlockLocations",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/fsync_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/HeapMemoryMax": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/create_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingReplicationBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/UnderReplicatedBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/FileInfoOps": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.FileInfoOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/MissingBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/blockReport_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/CapacityRemaining": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystemState.CapacityRemaining",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingDeletionBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getEditLogSize_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/FilesInGetListingOps": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.FilesInGetListingOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsRunnable",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/BlocksTotal": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CapacityTotal": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/complete_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/LiveNodes": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsNew",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/rollFsImage_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Syncs_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/StartTime": {
-              "metric": "java.lang:type=Runtime.StartTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/blockReceived_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/rollEditLog_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/DeadNodes": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/HeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/FilesTotal": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.FilesTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Version": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogWarn",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/ExcessBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.ExcessBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/PercentRemaining": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/blockReport_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/NonHeapMemoryMax": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/rollFsImage_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapUsedM",
-              "unit": "MB",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.ScheduledReplicationBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/BlocksTotal": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlocksTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillis",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getBlockLocations_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Transactions_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/create_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/CapacityTotal": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTerminated",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemainingGB",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Transactions_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/MissingBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.MissingBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Threads": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.callQueueLen",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CorruptBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/blockReport_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/TotalFiles": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogInfo",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/NameDirStatuses": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getListing_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/rollEditLog_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/addBlock_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/CapacityUsed": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/CreateFileOps": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.CreateFileOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logError": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogError",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/Version": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getListing_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogFatal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/NonDfsUsedSpace": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/renewLease_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/TotalBlocks": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityNonDFSUsed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcTimeMillisConcurrentMarkSweep": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillisConcurrentMarkSweep",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcCountConcurrentMarkSweep": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCountConcurrentMarkSweep",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTimeAvgTime": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTimeAvgTime": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/CorruptFiles": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.CorruptFiles",
-              "pointInTime": true,
-              "temporal": false
-            }
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/cpu/cpu_idle":{
-              "metric":"cpu_idle",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/cpu/cpu_nice":{
-              "metric":"cpu_nice",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/cpu/cpu_system":{
-              "metric":"cpu_system",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/cpu/cpu_user":{
-              "metric":"cpu_user",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/cpu/cpu_wio":{
-              "metric":"cpu_wio",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/disk/disk_free":{
-              "metric":"disk_free",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/disk/disk_total":{
-              "metric":"disk_total",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/load/load_fifteen":{
-              "metric":"load_fifteen",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/load/load_five":{
-              "metric":"load_five",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/load/load_one":{
-              "metric":"load_one",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/memory/mem_buffers":{
-              "metric":"mem_buffers",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/memory/mem_cached":{
-              "metric":"mem_cached",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/memory/mem_free":{
-              "metric":"mem_free",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/memory/mem_shared":{
-              "metric":"mem_shared",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/memory/mem_total":{
-              "metric":"mem_total",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/memory/swap_free":{
-              "metric":"swap_free",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/memory/swap_total":{
-              "metric":"swap_total",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/network/bytes_in":{
-              "metric":"bytes_in",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/network/bytes_out":{
-              "metric":"bytes_out",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/network/pkts_in":{
-              "metric":"pkts_in",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/network/pkts_out":{
-              "metric":"pkts_out",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/process/proc_run":{
-              "metric":"proc_run",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetr

<TRUNCATED>

[12/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode_upgrade.py
deleted file mode 100644
index 7585107..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode_upgrade.py
+++ /dev/null
@@ -1,152 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import time
-
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.default import default
-from resource_management.core.exceptions import Fail
-import utils
-from resource_management.libraries.functions.jmx import get_value_from_jmx
-import namenode_ha_state
-from namenode_ha_state import NAMENODE_STATE, NamenodeHAState
-from utils import get_dfsadmin_base_command
-
-
-def post_upgrade_check():
-  """
-  Ensure all journal nodes are up and quorum is established during Rolling Upgrade.
-  :return:
-  """
-  import params
-  Logger.info("Ensuring Journalnode quorum is established")
-
-  if params.security_enabled:
-    # We establish HDFS identity instead of JN Kerberos identity
-    # since this is an administrative HDFS call that requires the HDFS administrator user to perform.
-    Execute(params.hdfs_kinit_cmd, user=params.hdfs_user)
-
-  time.sleep(5)
-  hdfs_roll_edits()
-  time.sleep(5)
-
-  all_journal_node_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-
-  if len(all_journal_node_hosts) < 3:
-    raise Fail("Need at least 3 Journalnodes to maintain a quorum")
-
-  try:
-    namenode_ha = namenode_ha_state.NamenodeHAState()
-  except ValueError, err:
-    raise Fail("Could not retrieve Namenode HA addresses. Error: " + str(err))
-
-  Logger.info(str(namenode_ha))
-  nn_address = namenode_ha.get_address(NAMENODE_STATE.ACTIVE)
-
-  nn_data = utils.get_jmx_data(nn_address, 'org.apache.hadoop.hdfs.server.namenode.FSNamesystem', 'JournalTransactionInfo',
-                         namenode_ha.is_encrypted(), params.security_enabled)
-  if not nn_data:
-    raise Fail("Could not retrieve JournalTransactionInfo from JMX")
-
-  try:
-    last_txn_id = int(nn_data['LastAppliedOrWrittenTxId'])
-    success = ensure_jns_have_new_txn(all_journal_node_hosts, last_txn_id)
-
-    if not success:
-      raise Fail("Could not ensure that all Journal nodes have a new log transaction id")
-  except KeyError:
-    raise Fail("JournalTransactionInfo does not have key LastAppliedOrWrittenTxId from JMX info")
-
-
-def hdfs_roll_edits():
-  """
-  HDFS_CLIENT needs to be a dependency of JOURNALNODE
-  Roll the logs so that Namenode will be able to connect to the Journalnode.
-  Must kinit before calling this command.
-  """
-  import params
-
-  # TODO, this will need to be doc'ed since existing clusters will need HDFS_CLIENT on all JOURNALNODE hosts
-  dfsadmin_base_command = get_dfsadmin_base_command('hdfs')
-  command = dfsadmin_base_command + ' -rollEdits'
-  Execute(command, user=params.hdfs_user, tries=1)
-
-
-def ensure_jns_have_new_txn(nodelist, last_txn_id):
-  """
-  :param nodelist: List of Journalnodes
-  :param last_txn_id: Integer of last transaction id
-  :return: Return true on success, false otherwise
-  """
-  import params
-
-  jn_uri = default("/configurations/hdfs-site/dfs.namenode.shared.edits.dir", None)
-
-  if jn_uri is None:
-    raise Fail("No JournalNode URI found at hdfs-site/dfs.namenode.shared.edits.dir")
-
-  nodes = []
-  for node in nodelist:
-    if node in jn_uri:
-      nodes.append(node)
-
-  num_of_jns = len(nodes)
-  actual_txn_ids = {}
-  jns_updated = 0
-
-  if params.journalnode_address is None:
-    raise Fail("Could not retrieve JournalNode address")
-
-  if params.journalnode_port is None:
-    raise Fail("Could not retrieve JournalNode port")
-
-  time_out_secs = 3 * 60
-  step_time_secs = 10
-  iterations = int(time_out_secs/step_time_secs)
-
-  protocol = "https" if params.https_only else "http"
-
-  Logger.info("Checking if all JournalNodes are updated.")
-  for i in range(iterations):
-    Logger.info('Try %d out of %d' % (i+1, iterations))
-    for node in nodes:
-      # if all JNS are updated break
-      if jns_updated == num_of_jns:
-        Logger.info("All journal nodes are updated")
-        return True
-
-      # JN already meets condition, skip it
-      if node in actual_txn_ids and actual_txn_ids[node] and actual_txn_ids[node] >= last_txn_id:
-        continue
-
-      url = '%s://%s:%s' % (protocol, node, params.journalnode_port)
-      data = utils.get_jmx_data(url, 'Journal-', 'LastWrittenTxId', params.https_only, params.security_enabled)
-      if data:
-        actual_txn_ids[node] = int(data)
-        if actual_txn_ids[node] >= last_txn_id:
-          Logger.info("JournalNode %s has a higher transaction id: %s" % (node, str(data)))
-          jns_updated += 1
-        else:
-          Logger.info("JournalNode %s is still on transaction id: %s" % (node, str(data)))
-
-    Logger.info("Sleeping for %d secs" % step_time_secs)
-    time.sleep(step_time_secs)
-
-  return jns_updated == num_of_jns
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
deleted file mode 100644
index 86f68e5..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
+++ /dev/null
@@ -1,424 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-import os
-import time
-import json
-import tempfile
-from datetime import datetime
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-from resource_management.libraries.script.script import Script
-from resource_management.core.resources.system import Execute, File
-from resource_management.core import shell
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import Direction
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-
-from resource_management.core.exceptions import Fail
-from resource_management.core.shell import as_user
-from resource_management.core.logger import Logger
-
-
-from ambari_commons.os_family_impl import OsFamilyImpl
-from ambari_commons import OSConst
-
-
-import namenode_upgrade
-from hdfs_namenode import namenode, wait_for_safemode_off
-from hdfs import hdfs
-import hdfs_rebalance
-from utils import initiate_safe_zkfc_failover, get_hdfs_binary, get_dfsadmin_base_command
-
-
-
-# hashlib is supplied as of Python 2.5 as the replacement interface for md5
-# and other secure hashes.  In 2.6, md5 is deprecated.  Import hashlib if
-# available, avoiding a deprecation warning under 2.6.  Import md5 otherwise,
-# preserving 2.4 compatibility.
-try:
-  import hashlib
-  _md5 = hashlib.md5
-except ImportError:
-  import md5
-  _md5 = md5.new
-
-class NameNode(Script):
-
-  def get_component_name(self):
-    return "hadoop-hdfs-namenode"
-
-  def get_hdfs_binary(self):
-    """
-    Get the name or path to the hdfs binary depending on the component name.
-    """
-    component_name = self.get_component_name()
-    return get_hdfs_binary(component_name)
-
-  def install(self, env):
-    import params
-    env.set_params(params)
-    self.install_packages(env)
-    #TODO we need this for HA because of manual steps
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    hdfs("namenode")
-    hdfs_binary = self.get_hdfs_binary()
-    namenode(action="configure", hdfs_binary=hdfs_binary, env=env)
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    hdfs_binary = self.get_hdfs_binary()
-    namenode(action="start", hdfs_binary=hdfs_binary, upgrade_type=upgrade_type,
-      upgrade_suspended=params.upgrade_suspended, env=env)
-
-    # after starting NN in an upgrade, touch the marker file
-    if upgrade_type is not None:
-      # place a file on the system indicating that we've submitting the command that
-      # instructs NN that it is now part of an upgrade
-      namenode_upgrade.create_upgrade_marker()
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    hdfs_binary = self.get_hdfs_binary()
-    if upgrade_type == "rolling" and params.dfs_ha_enabled:
-      if params.dfs_ha_automatic_failover_enabled:
-        initiate_safe_zkfc_failover()
-      else:
-        raise Fail("Rolling Upgrade - dfs.ha.automatic-failover.enabled must be enabled to perform a rolling restart")
-    namenode(action="stop", hdfs_binary=hdfs_binary, upgrade_type=upgrade_type, env=env)
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    namenode(action="status", env=env)
-
-  def decommission(self, env):
-    import params
-    env.set_params(params)
-    hdfs_binary = self.get_hdfs_binary()
-    namenode(action="decommission", hdfs_binary=hdfs_binary)
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class NameNodeDefault(NameNode):
-
-  def restore_snapshot(self, env):
-    """
-    Restore the snapshot during a Downgrade.
-    """
-    print "TODO AMBARI-12698"
-    pass
-
-  def prepare_express_upgrade(self, env):
-    """
-    During an Express Upgrade.
-    If in HA, on the Active NameNode only, examine the directory dfs.namenode.name.dir and
-    make sure that there is no "/previous" directory.
-
-    Create a list of all the DataNodes in the cluster.
-    hdfs dfsadmin -report > dfs-old-report-1.log
-
-    hdfs dfsadmin -safemode enter
-    hdfs dfsadmin -saveNamespace
-
-    Copy the checkpoint files located in ${dfs.namenode.name.dir}/current into a backup directory.
-
-    Finalize any prior HDFS upgrade,
-    hdfs dfsadmin -finalizeUpgrade
-
-    Prepare for a NameNode rolling upgrade in order to not lose any data.
-    hdfs dfsadmin -rollingUpgrade prepare
-    """
-    import params
-    Logger.info("Preparing the NameNodes for a NonRolling (aka Express) Upgrade.")
-
-    if params.security_enabled:
-      kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}")
-      Execute(kinit_command, user=params.hdfs_user, logoutput=True)
-
-    hdfs_binary = self.get_hdfs_binary()
-    namenode_upgrade.prepare_upgrade_check_for_previous_dir()
-    namenode_upgrade.prepare_upgrade_enter_safe_mode(hdfs_binary)
-    namenode_upgrade.prepare_upgrade_save_namespace(hdfs_binary)
-    namenode_upgrade.prepare_upgrade_backup_namenode_dir()
-    namenode_upgrade.prepare_upgrade_finalize_previous_upgrades(hdfs_binary)
-
-    # Call -rollingUpgrade prepare
-    namenode_upgrade.prepare_rolling_upgrade(hdfs_binary)
-
-  def prepare_rolling_upgrade(self, env):
-    hfds_binary = self.get_hdfs_binary()
-    namenode_upgrade.prepare_rolling_upgrade(hfds_binary)
-
-  def wait_for_safemode_off(self, env):
-    wait_for_safemode_off(self.get_hdfs_binary(), 30, True)
-
-  def finalize_non_rolling_upgrade(self, env):
-    hfds_binary = self.get_hdfs_binary()
-    namenode_upgrade.finalize_upgrade("nonrolling", hfds_binary)
-
-  def finalize_rolling_upgrade(self, env):
-    hfds_binary = self.get_hdfs_binary()
-    namenode_upgrade.finalize_upgrade("rolling", hfds_binary)
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      # When downgrading an Express Upgrade, the first thing we do is to revert the symlinks.
-      # Therefore, we cannot call this code in that scenario.
-      call_if = [("rolling", "upgrade"), ("rolling", "downgrade"), ("nonrolling", "upgrade")]
-      for e in call_if:
-        if (upgrade_type, params.upgrade_direction) == e:
-          conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-hdfs-namenode", params.version)
-
-  def post_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade post-restart")
-    import params
-    env.set_params(params)
-
-    hdfs_binary = self.get_hdfs_binary()
-    dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
-    dfsadmin_cmd = dfsadmin_base_command + " -report -live"
-    Execute(dfsadmin_cmd,
-            user=params.hdfs_user,
-            tries=60,
-            try_sleep=10
-    )
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['dfs.namenode.kerberos.internal.spnego.principal',
-                         'dfs.namenode.keytab.file',
-                         'dfs.namenode.kerberos.principal']
-    props_read_check = ['dfs.namenode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'hdfs-site' not in security_params
-               or 'dfs.namenode.keytab.file' not in security_params['hdfs-site']
-               or 'dfs.namenode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.namenode.keytab.file'],
-                                security_params['hdfs-site']['dfs.namenode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-  def rebalancehdfs(self, env):
-    import params
-    env.set_params(params)
-
-    name_node_parameters = json.loads( params.name_node_params )
-    threshold = name_node_parameters['threshold']
-    _print("Starting balancer with threshold = %s\n" % threshold)
-
-    rebalance_env = {'PATH': params.hadoop_bin_dir}
-
-    if params.security_enabled:
-      # Create the kerberos credentials cache (ccache) file and set it in the environment to use
-      # when executing HDFS rebalance command. Use the md5 hash of the combination of the principal and keytab file
-      # to generate a (relatively) unique cache filename so that we can use it as needed.
-      # TODO: params.tmp_dir=/var/lib/ambari-agent/tmp. However hdfs user doesn't have access to this path.
-      # TODO: Hence using /tmp
-      ccache_file_name = "hdfs_rebalance_cc_" + _md5(format("{hdfs_principal_name}|{hdfs_user_keytab}")).hexdigest()
-      ccache_file_path = os.path.join(tempfile.gettempdir(), ccache_file_name)
-      rebalance_env['KRB5CCNAME'] = ccache_file_path
-
-      # If there are no tickets in the cache or they are expired, perform a kinit, else use what
-      # is in the cache
-      klist_cmd = format("{klist_path_local} -s {ccache_file_path}")
-      kinit_cmd = format("{kinit_path_local} -c {ccache_file_path} -kt {hdfs_user_keytab} {hdfs_principal_name}")
-      if shell.call(klist_cmd, user=params.hdfs_user)[0] != 0:
-        Execute(kinit_cmd, user=params.hdfs_user)
-
-    def calculateCompletePercent(first, current):
-      # avoid division by zero
-      try:
-        division_result = current.bytesLeftToMove/first.bytesLeftToMove
-      except ZeroDivisionError:
-        Logger.warning("Division by zero. Bytes Left To Move = {0}. Return 1.0".format(first.bytesLeftToMove))
-        return 1.0
-      return 1.0 - division_result
-
-
-    def startRebalancingProcess(threshold, rebalance_env):
-      rebalanceCommand = format('hdfs --config {hadoop_conf_dir} balancer -threshold {threshold}')
-      return as_user(rebalanceCommand, params.hdfs_user, env=rebalance_env)
-
-    command = startRebalancingProcess(threshold, rebalance_env)
-
-    basedir = os.path.join(env.config.basedir, 'scripts')
-    if(threshold == 'DEBUG'): #FIXME TODO remove this on PROD
-      basedir = os.path.join(env.config.basedir, 'scripts', 'balancer-emulator')
-      command = ['ambari-python-wrap','hdfs-command.py']
-
-    _print("Executing command %s\n" % command)
-
-    parser = hdfs_rebalance.HdfsParser()
-
-    def handle_new_line(line, is_stderr):
-      if is_stderr:
-        return
-
-      _print('[balancer] %s' % (line))
-      pl = parser.parseLine(line)
-      if pl:
-        res = pl.toJson()
-        res['completePercent'] = calculateCompletePercent(parser.initialLine, pl)
-
-        self.put_structured_out(res)
-      elif parser.state == 'PROCESS_FINISED' :
-        _print('[balancer] %s' % ('Process is finished' ))
-        self.put_structured_out({'completePercent' : 1})
-        return
-
-    Execute(command,
-            on_new_line = handle_new_line,
-            logoutput = False,
-    )
-
-    if params.security_enabled:
-      # Delete the kerberos credentials cache (ccache) file
-      File(ccache_file_path,
-           action = "delete",
-      )
-      
-  def get_log_folder(self):
-    import params
-    return params.hdfs_log_dir
-  
-  def get_user(self):
-    import params
-    return params.hdfs_user
-
-  def get_pid_files(self):
-    import status_params
-    return [status_params.namenode_pid_file]
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class NameNodeWindows(NameNode):
-  def install(self, env):
-    import install_params
-    self.install_packages(env)
-    #TODO we need this for HA because of manual steps
-    self.configure(env)
-
-  def rebalancehdfs(self, env):
-    from ambari_commons.os_windows import UserHelper, run_os_command_impersonated
-    import params
-    env.set_params(params)
-
-    hdfs_username, hdfs_domain = UserHelper.parse_user_name(params.hdfs_user, ".")
-
-    name_node_parameters = json.loads( params.name_node_params )
-    threshold = name_node_parameters['threshold']
-    _print("Starting balancer with threshold = %s\n" % threshold)
-
-    def calculateCompletePercent(first, current):
-      return 1.0 - current.bytesLeftToMove/first.bytesLeftToMove
-
-    def startRebalancingProcess(threshold):
-      rebalanceCommand = 'hdfs balancer -threshold %s' % threshold
-      return ['cmd', '/C', rebalanceCommand]
-
-    command = startRebalancingProcess(threshold)
-    basedir = os.path.join(env.config.basedir, 'scripts')
-
-    _print("Executing command %s\n" % command)
-
-    parser = hdfs_rebalance.HdfsParser()
-    returncode, stdout, err = run_os_command_impersonated(' '.join(command), hdfs_username, Script.get_password(params.hdfs_user), hdfs_domain)
-
-    for line in stdout.split('\n'):
-      _print('[balancer] %s %s' % (str(datetime.now()), line ))
-      pl = parser.parseLine(line)
-      if pl:
-        res = pl.toJson()
-        res['completePercent'] = calculateCompletePercent(parser.initialLine, pl)
-
-        self.put_structured_out(res)
-      elif parser.state == 'PROCESS_FINISED' :
-        _print('[balancer] %s %s' % (str(datetime.now()), 'Process is finished' ))
-        self.put_structured_out({'completePercent' : 1})
-        break
-
-    if returncode != None and returncode != 0:
-      raise Fail('Hdfs rebalance process exited with error. See the log output')
-
-def _print(line):
-  sys.stdout.write(line)
-  sys.stdout.flush()
-
-if __name__ == "__main__":
-  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode_ha_state.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode_ha_state.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode_ha_state.py
deleted file mode 100644
index 259af2e..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode_ha_state.py
+++ /dev/null
@@ -1,219 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.core import shell
-from resource_management.core.logger import Logger
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.jmx import get_value_from_jmx
-
-
-class NAMENODE_STATE:
-  ACTIVE = "active"
-  STANDBY = "standby"
-  UNKNOWN = "unknown"
-
-
-class NamenodeHAState:
-  """
-  Represents the current state of the Namenode Hosts in High Availability Mode
-  """
-
-  def __init__(self):
-    """
-    Initializes all fields by querying the Namenode state.
-    Raises a ValueError if unable to construct the object.
-    """
-    import params
-
-    self.name_service = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
-    if self.name_service is None:
-      self.name_service = default('/configurations/hdfs-site/dfs.nameservices', None)
-
-    if not self.name_service:
-      raise ValueError("Could not retrieve property dfs.nameservices or dfs.internal.nameservices")
-
-    nn_unique_ids_key = "dfs.ha.namenodes." + str(self.name_service)
-    # List of the nn unique ids
-    self.nn_unique_ids = default("/configurations/hdfs-site/" + nn_unique_ids_key, None)
-    if not self.nn_unique_ids:
-      raise ValueError("Could not retrieve property " + nn_unique_ids_key)
-
-    self.nn_unique_ids = self.nn_unique_ids.split(",")
-    self.nn_unique_ids = [x.strip() for x in self.nn_unique_ids]
-
-    policy = default("/configurations/hdfs-site/dfs.http.policy", "HTTP_ONLY")
-    self.encrypted = policy.upper() == "HTTPS_ONLY"
-
-    jmx_uri_fragment = ("https" if self.encrypted else "http") + "://{0}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem"
-    namenode_http_fragment = "dfs.namenode.http-address.{0}.{1}"
-    namenode_https_fragment = "dfs.namenode.https-address.{0}.{1}"
-
-    # Dictionary where the key is the Namenode State (e.g., ACTIVE), and the value is a set of hostnames
-    self.namenode_state_to_hostnames = {}
-
-    # Dictionary from nn unique id name to a tuple of (http address, https address)
-    self.nn_unique_id_to_addresses = {}
-    for nn_unique_id in self.nn_unique_ids:
-      http_key = namenode_http_fragment.format(self.name_service, nn_unique_id)
-      https_key = namenode_https_fragment.format(self.name_service, nn_unique_id)
-
-      http_value = default("/configurations/hdfs-site/" + http_key, None)
-      https_value = default("/configurations/hdfs-site/" + https_key, None)
-      actual_value = https_value if self.encrypted else http_value
-      hostname = actual_value.split(":")[0].strip() if actual_value and ":" in actual_value else None
-
-      self.nn_unique_id_to_addresses[nn_unique_id] = (http_value, https_value)
-      try:
-        if not hostname:
-          raise Exception("Could not retrieve hostname from address " + actual_value)
-
-        jmx_uri = jmx_uri_fragment.format(actual_value)
-        state = get_value_from_jmx(jmx_uri, "tag.HAState", params.security_enabled, params.hdfs_user, params.is_https_enabled)
-
-        # If JMX parsing failed
-        if not state:
-          run_user = default("/configurations/hadoop-env/hdfs_user", "hdfs")
-          check_service_cmd = "hdfs haadmin -ns {dfs_ha_nameservices} -getServiceState {0}".format(nn_unique_id)
-          code, out = shell.call(check_service_cmd, logoutput=True, user=run_user)
-          if code == 0 and out:
-            if NAMENODE_STATE.STANDBY in out:
-              state = NAMENODE_STATE.STANDBY
-            elif NAMENODE_STATE.ACTIVE in out:
-              state = NAMENODE_STATE.ACTIVE
-
-        if not state:
-          raise Exception("Could not retrieve Namenode state from URL " + jmx_uri)
-
-        state = state.lower()
-
-        if state not in [NAMENODE_STATE.ACTIVE, NAMENODE_STATE.STANDBY]:
-          state = NAMENODE_STATE.UNKNOWN
-
-        if state in self.namenode_state_to_hostnames:
-          self.namenode_state_to_hostnames[state].add(hostname)
-        else:
-          hostnames = set([hostname, ])
-          self.namenode_state_to_hostnames[state] = hostnames
-      except:
-        Logger.error("Could not get namenode state for " + nn_unique_id)
-
-  def __str__(self):
-    return "Namenode HA State: {\n" + \
-           ("IDs: %s\n"       % ", ".join(self.nn_unique_ids)) + \
-           ("Addresses: %s\n" % str(self.nn_unique_id_to_addresses)) + \
-           ("States: %s\n"    % str(self.namenode_state_to_hostnames)) + \
-           ("Encrypted: %s\n" % str(self.encrypted)) + \
-           ("Healthy: %s\n"   % str(self.is_healthy())) + \
-           "}"
-
-  def is_encrypted(self):
-    """
-    :return: Returns a bool indicating if HTTPS is enabled
-    """
-    return self.encrypted
-
-  def get_nn_unique_ids(self):
-    """
-    :return Returns a list of the nn unique ids
-    """
-    return self.nn_unique_ids
-
-  def get_nn_unique_id_to_addresses(self):
-    """
-    :return Returns a dictionary where the key is the nn unique id, and the value is a tuple of (http address, https address)
-    Each address is of the form, hostname:port
-    """
-    return self.nn_unique_id_to_addresses
-
-  def get_address_for_nn_id(self, id):
-    """
-    :param id: Namenode ID
-    :return: Returns the appropriate address (HTTP if no encryption, HTTPS otherwise) for the given namenode id.
-    """
-    if id in self.nn_unique_id_to_addresses:
-      addresses = self.nn_unique_id_to_addresses[id]
-      if addresses and len(addresses) == 2:
-        return addresses[1] if self.encrypted else addresses[0]
-    return None
-
-  def get_address_for_host(self, hostname):
-    """
-    :param hostname: Host name
-    :return: Returns the appropriate address (HTTP if no encryption, HTTPS otherwise) for the given host.
-    """
-    for id, addresses in self.nn_unique_id_to_addresses.iteritems():
-      if addresses and len(addresses) == 2:
-        if ":" in addresses[0]:
-          nn_hostname = addresses[0].split(":")[0].strip()
-          if nn_hostname == hostname:
-            # Found the host
-            return addresses[1] if self.encrypted else addresses[0]
-    return None
-
-  def get_namenode_state_to_hostnames(self):
-    """
-    :return Return a dictionary where the key is a member of NAMENODE_STATE, and the value is a set of hostnames.
-    """
-    return self.namenode_state_to_hostnames
-
-  def get_address(self, namenode_state):
-    """
-    @param namenode_state: Member of NAMENODE_STATE
-    :return Get the address that corresponds to the first host with the given state
-    """
-    hosts = self.namenode_state_to_hostnames[namenode_state] if namenode_state in self.namenode_state_to_hostnames else []
-    if hosts and len(hosts) > 0:
-      hostname = list(hosts)[0]
-      return self.get_address_for_host(hostname)
-    return None
-
-  def is_active(self, host_name):
-    """
-    :param host_name: Host name
-    :return: Return True if this is the active NameNode, otherwise, False.
-    """
-    return self._is_in_state(host_name, NAMENODE_STATE.ACTIVE)
-
-  def is_standby(self, host_name):
-    """
-    :param host_name: Host name
-    :return: Return True if this is the standby NameNode, otherwise, False.
-    """
-    return self._is_in_state(host_name, NAMENODE_STATE.STANDBY)
-
-  def _is_in_state(self, host_name, state):
-    """
-    :param host_name: Host name
-    :param state: State to check
-    :return: Return True if this NameNode is in the specified state, otherwise, False.
-    """
-    mapping = self.get_namenode_state_to_hostnames()
-    if state in mapping:
-      hosts_in_state = mapping[state]
-      if hosts_in_state is not None and len(hosts_in_state) == 1 and next(iter(hosts_in_state)).lower() == host_name.lower():
-        return True
-    return False
-
-  def is_healthy(self):
-    """
-    :return: Returns a bool indicating if exactly one ACTIVE and one STANDBY host exist.
-    """
-    active_hosts = self.namenode_state_to_hostnames[NAMENODE_STATE.ACTIVE] if NAMENODE_STATE.ACTIVE in self.namenode_state_to_hostnames else []
-    standby_hosts = self.namenode_state_to_hostnames[NAMENODE_STATE.STANDBY] if NAMENODE_STATE.STANDBY in self.namenode_state_to_hostnames else []
-    return len(active_hosts) == 1 and len(standby_hosts) == 1

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode_upgrade.py
deleted file mode 100644
index f683dcc..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode_upgrade.py
+++ /dev/null
@@ -1,322 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import re
-import os
-
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute
-from resource_management.core.resources.system import File
-from resource_management.core import shell
-from resource_management.core.shell import as_user
-from resource_management.core.exceptions import Fail
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions import get_unique_id_and_date
-from resource_management.libraries.functions import Direction, SafeMode
-from utils import get_dfsadmin_base_command
-
-from namenode_ha_state import NamenodeHAState
-
-
-safemode_to_instruction = {SafeMode.ON: "enter",
-                           SafeMode.OFF: "leave"}
-
-NAMENODE_UPGRADE_IN_PROGRESS_MARKER_FILE = "namenode-upgrade-in-progress"
-
-def prepare_upgrade_check_for_previous_dir():
-  """
-  During a NonRolling (aka Express Upgrade), preparing the NameNode requires backing up some data.
-  Check that there is no "previous" folder inside the NameNode Name Dir.
-  """
-  import params
-
-  if params.dfs_ha_enabled:
-    namenode_ha = NamenodeHAState()
-    if namenode_ha.is_active(params.hostname):
-      Logger.info("NameNode High Availability is enabled and this is the Active NameNode.")
-
-      problematic_previous_namenode_dirs = set()
-      nn_name_dirs = params.dfs_name_dir.split(',')
-      for nn_dir in nn_name_dirs:
-        if os.path.isdir(nn_dir):
-          # Check for a previous folder, which is not allowed.
-          previous_dir = os.path.join(nn_dir, "previous")
-          if os.path.isdir(previous_dir):
-            problematic_previous_namenode_dirs.add(previous_dir)
-
-      if len(problematic_previous_namenode_dirs) > 0:
-        message = 'WARNING. The following NameNode Name Dir(s) have a "previous" folder from an older version.\n' \
-                  'Please back it up first, and then delete it, OR Finalize (E.g., "hdfs dfsadmin -finalizeUpgrade").\n' \
-                  'NameNode Name Dir(s): {0}\n' \
-                  '***** Then, retry this step. *****'.format(", ".join(problematic_previous_namenode_dirs))
-        Logger.error(message)
-        raise Fail(message)
-
-def prepare_upgrade_enter_safe_mode(hdfs_binary):
-  """
-  During a NonRolling (aka Express Upgrade), preparing the NameNode requires first entering Safemode.
-  :param hdfs_binary: name/path of the HDFS binary to use
-  """
-  import params
-
-  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
-  safe_mode_enter_cmd = dfsadmin_base_command + " -safemode enter"
-  try:
-    # Safe to call if already in Safe Mode
-    desired_state = SafeMode.ON
-    safemode_transition_successful, original_state = reach_safemode_state(params.hdfs_user, desired_state, params.dfs_ha_enabled, hdfs_binary)
-    Logger.info("Transition successful: {0}, original state: {1}".format(str(safemode_transition_successful), str(original_state)))
-    if not safemode_transition_successful:
-      raise Fail("Could not transition to safemode state %s. Please check logs to make sure namenode is up." % str(desired_state))
-  except Exception, e:
-    message = "Could not enter safemode. Error: {0}. As the HDFS user, call this command: {1}".format(str(e), safe_mode_enter_cmd)
-    Logger.error(message)
-    raise Fail(message)
-
-def prepare_upgrade_save_namespace(hdfs_binary):
-  """
-  During a NonRolling (aka Express Upgrade), preparing the NameNode requires saving the namespace.
-  :param hdfs_binary: name/path of the HDFS binary to use
-  """
-  import params
-
-  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
-  save_namespace_cmd = dfsadmin_base_command + " -saveNamespace"
-  try:
-    Logger.info("Checkpoint the current namespace.")
-    as_user(save_namespace_cmd, params.hdfs_user, env={'PATH': params.hadoop_bin_dir})
-  except Exception, e:
-    message = format("Could not save the NameSpace. As the HDFS user, call this command: {save_namespace_cmd}")
-    Logger.error(message)
-    raise Fail(message)
-
-def prepare_upgrade_backup_namenode_dir():
-  """
-  During a NonRolling (aka Express Upgrade), preparing the NameNode requires backing up the NameNode Name Dirs.
-  """
-  import params
-
-  i = 0
-  failed_paths = []
-  nn_name_dirs = params.dfs_name_dir.split(',')
-  backup_destination_root_dir = "{0}/{1}".format(params.namenode_backup_dir, params.stack_version_unformatted)
-  if len(nn_name_dirs) > 0:
-    Logger.info("Backup the NameNode name directory's CURRENT folder.")
-  for nn_dir in nn_name_dirs:
-    i += 1
-    namenode_current_image = os.path.join(nn_dir, "current")
-    unique = get_unique_id_and_date() + "_" + str(i)
-    # Note that /tmp may not be writeable.
-    backup_current_folder = "{0}/namenode_{1}/".format(backup_destination_root_dir, unique)
-
-    if os.path.isdir(namenode_current_image) and not os.path.isdir(backup_current_folder):
-      try:
-        os.makedirs(backup_current_folder)
-        Execute(('cp', '-ar', namenode_current_image, backup_current_folder),
-                sudo=True
-        )
-      except Exception, e:
-        failed_paths.append(namenode_current_image)
-  if len(failed_paths) > 0:
-    Logger.error("Could not backup the NameNode Name Dir(s) to {0}, make sure that the destination path is "
-                 "writeable and copy the directories on your own. Directories: {1}".format(backup_destination_root_dir,
-                                                                                           ", ".join(failed_paths)))
-
-def prepare_upgrade_finalize_previous_upgrades(hdfs_binary):
-  """
-  During a NonRolling (aka Express Upgrade), preparing the NameNode requires Finalizing any upgrades that are in progress.
-  :param hdfs_binary: name/path of the HDFS binary to use
-  """
-  import params
-
-  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
-  finalize_command = dfsadmin_base_command + " -rollingUpgrade finalize"
-  try:
-    Logger.info("Attempt to Finalize if there are any in-progress upgrades. "
-                "This will return 255 if no upgrades are in progress.")
-    code, out = shell.checked_call(finalize_command, logoutput=True, user=params.hdfs_user)
-    if out:
-      expected_substring = "there is no rolling upgrade in progress"
-      if expected_substring not in out.lower():
-        Logger.warning('Finalize command did not contain substring: %s' % expected_substring)
-    else:
-      Logger.warning("Finalize command did not return any output.")
-  except Exception, e:
-    Logger.warning("Ensure no upgrades are in progress.")
-
-def reach_safemode_state(user, safemode_state, in_ha, hdfs_binary):
-  """
-  Enter or leave safemode for the Namenode.
-  :param user: user to perform action as
-  :param safemode_state: Desired state of ON or OFF
-  :param in_ha: bool indicating if Namenode High Availability is enabled
-  :param hdfs_binary: name/path of the HDFS binary to use
-  :return: Returns a tuple of (transition success, original state). If no change is needed, the indicator of
-  success will be True
-  """
-  Logger.info("Prepare to transition into safemode state %s" % safemode_state)
-  import params
-  original_state = SafeMode.UNKNOWN
-
-  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
-  safemode_base_command = dfsadmin_base_command + " -safemode "
-  safemode_check_cmd = safemode_base_command + " get"
-
-  grep_pattern = format("Safe mode is {safemode_state}")
-  safemode_check_with_grep = format("{safemode_check_cmd} | grep '{grep_pattern}'")
-
-  code, out = shell.call(safemode_check_cmd, user=user, logoutput=True)
-  Logger.info("Command: %s\nCode: %d." % (safemode_check_cmd, code))
-  if code == 0 and out is not None:
-    Logger.info(out)
-    re_pattern = r"Safe mode is (\S*)"
-    Logger.info("Pattern to search: {0}".format(re_pattern))
-    m = re.search(re_pattern, out, re.IGNORECASE)
-    if m and len(m.groups()) >= 1:
-      original_state = m.group(1).upper()
-
-      if original_state == safemode_state:
-        return (True, original_state)
-      else:
-        # Make a transition
-        command = safemode_base_command + safemode_to_instruction[safemode_state]
-        Execute(command,
-                user=user,
-                logoutput=True,
-                path=[params.hadoop_bin_dir])
-
-        code, out = shell.call(safemode_check_with_grep, user=user)
-        Logger.info("Command: %s\nCode: %d. Out: %s" % (safemode_check_with_grep, code, out))
-        if code == 0:
-          return (True, original_state)
-  return (False, original_state)
-
-
-def prepare_rolling_upgrade(hdfs_binary):
-  """
-  This can be called during either Rolling Upgrade or Express Upgrade (aka nonrolling)
-
-  Rolling Upgrade for HDFS Namenode requires the following.
-  0. Namenode must be up
-  1. If HA: leave safemode if the safemode status is not OFF
-  2. Execute a rolling upgrade "prepare"
-  3. Execute a rolling upgrade "query"
-  :param hdfs_binary: name/path of the HDFS binary to use
-  """
-  import params
-
-  if not params.upgrade_direction or params.upgrade_direction not in [Direction.UPGRADE, Direction.DOWNGRADE]:
-    raise Fail("Could not retrieve upgrade direction: %s" % str(params.upgrade_direction))
-  Logger.info(format("Performing a(n) {params.upgrade_direction} of HDFS"))
-
-  if params.security_enabled:
-    kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}") 
-    Execute(kinit_command, user=params.hdfs_user, logoutput=True)
-
-  if params.upgrade_direction == Direction.UPGRADE:
-    if params.dfs_ha_enabled:
-      Logger.info('High Availability is enabled, must leave safemode before calling "-rollingUpgrade prepare"')
-      desired_state = SafeMode.OFF
-      safemode_transition_successful, original_state = reach_safemode_state(params.hdfs_user, desired_state, True, hdfs_binary)
-      if not safemode_transition_successful:
-        raise Fail("Could not transition to safemode state %s. Please check logs to make sure namenode is up." % str(desired_state))
-
-    dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
-    prepare = dfsadmin_base_command + " -rollingUpgrade prepare"
-    query = dfsadmin_base_command + " -rollingUpgrade query"
-    Execute(prepare,
-            user=params.hdfs_user,
-            logoutput=True)
-    Execute(query,
-            user=params.hdfs_user,
-            logoutput=True)
-
-def finalize_upgrade(upgrade_type, hdfs_binary):
-  """
-  Finalize the Namenode upgrade, at which point it cannot be downgraded.
-  :param upgrade_type rolling or nonrolling
-  :param hdfs_binary: name/path of the HDFS binary to use
-  """
-  Logger.info("Executing Rolling Upgrade finalize")
-  import params
-
-  if params.security_enabled:
-    kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}") 
-    Execute(kinit_command, user=params.hdfs_user, logoutput=True)
-
-  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
-  finalize_cmd = dfsadmin_base_command + " -rollingUpgrade finalize"
-  query_cmd = dfsadmin_base_command + " -rollingUpgrade query"
-
-  Execute(query_cmd,
-        user=params.hdfs_user,
-        logoutput=True)
-  Execute(finalize_cmd,
-          user=params.hdfs_user,
-          logoutput=True)
-  Execute(query_cmd,
-          user=params.hdfs_user,
-          logoutput=True)
-
-  # upgrade is finalized; remove the upgrade marker
-  delete_upgrade_marker()
-
-
-def get_upgrade_in_progress_marker():
-  """
-  Gets the full path of the file which indicates that NameNode has begun its stack upgrade.
-  :return:
-  """
-  from resource_management.libraries.script.script import Script
-  return os.path.join(Script.get_tmp_dir(), NAMENODE_UPGRADE_IN_PROGRESS_MARKER_FILE)
-
-
-def create_upgrade_marker():
-  """
-  Creates the marker file indicating that NameNode has begun participating in a stack upgrade.
-  If the file already exists, nothing will be done. This will silently log exceptions on failure.
-  :return:
-  """
-  # create the marker file which indicates
-  try:
-    namenode_upgrade_in_progress_marker = get_upgrade_in_progress_marker()
-    if not os.path.isfile(namenode_upgrade_in_progress_marker):
-      File(namenode_upgrade_in_progress_marker)
-  except:
-    Logger.warning("Unable to create NameNode upgrade marker file {0}".format(namenode_upgrade_in_progress_marker))
-
-
-def delete_upgrade_marker():
-  """
-  Removes the marker file indicating that NameNode has begun participating in a stack upgrade.
-  If the file does not exist, then nothing will be done.
-  Failure to remove this file could cause problems with restarts in the future. That's why
-  checking to see if there is a suspended upgrade is also advised. This function will raise
-  an exception if the file can't be removed.
-  :return:
-  """
-  # create the marker file which indicates
-  try:
-    namenode_upgrade_in_progress_marker = get_upgrade_in_progress_marker()
-    if os.path.isfile(namenode_upgrade_in_progress_marker):
-      File(namenode_upgrade_in_progress_marker, action='delete')
-  except:
-    error_message = "Unable to remove NameNode upgrade marker file {0}".format(namenode_upgrade_in_progress_marker)
-    Logger.error(error_message)
-    raise Fail(error_message)
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/nfsgateway.py
deleted file mode 100644
index 7ba1f96..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/nfsgateway.py
+++ /dev/null
@@ -1,151 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-from hdfs_nfsgateway import nfsgateway
-from hdfs import hdfs
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-
-
-class NFSGateway(Script):
-
-  def get_component_name(self):
-    return "hadoop-hdfs-nfs3"
-
-  def install(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.install_packages(env)
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    if params.stack_version_formatted and check_stack_feature(StackFeature.NFS, params.stack_version_formatted):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-hdfs-nfs3", params.version)
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    self.configure(env)
-    nfsgateway(action="start")
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    nfsgateway(action="stop")
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-    hdfs()
-    nfsgateway(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.nfsgateway_pid_file)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['nfs.keytab.file',
-                         'nfs.kerberos.principal']
-    props_read_check = ['nfs.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                'nfs.keytab.file' not in security_params['hdfs-site'] or
-                'nfs.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['nfs.keytab.file'],
-                                security_params['hdfs-site'][
-                                  'nfs.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
-  def get_log_folder(self):
-    import params
-    return params.hdfs_log_dir
-  
-  def get_user(self):
-    import params
-    return params.hdfs_user
-
-  def get_pid_files(self):
-    import status_params
-    return [status_params.nfsgateway_pid_file]
-
-if __name__ == "__main__":
-  NFSGateway().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params.py
deleted file mode 100644
index 25231f9..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from ambari_commons import OSCheck
-from resource_management.libraries.functions.default import default
-
-if OSCheck.is_windows_family():
-  from params_windows import *
-else:
-  from params_linux import *
-
-nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
-retryAble = default("/commandParams/command_retry_enabled", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
deleted file mode 100644
index 55544e0..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
+++ /dev/null
@@ -1,527 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import status_params
-import utils
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import os
-import re
-
-from ambari_commons.os_check import OSCheck
-
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.stack_features import get_stack_feature_version
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.expect import expect
-from resource_management.libraries.functions import get_klist_path
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions.format_jvm_option import format_jvm_option
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
-from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
-from resource_management.libraries.functions import is_empty
-
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-stack_name = status_params.stack_name
-stack_root = Script.get_stack_root()
-upgrade_direction = default("/commandParams/upgrade_direction", None)
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
-agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
-
-# there is a stack upgrade which has not yet been finalized; it's currently suspended
-upgrade_suspended = default("roleParams/upgrade_suspended", False)
-
-# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
-version = default("/commandParams/version", None)
-
-# The desired role is only available during a Non-Rolling Upgrade in HA.
-# The server calculates which of the two NameNodes will be the active, and the other the standby since they
-# are started using different commands.
-desired_namenode_role = default("/commandParams/desired_namenode_role", None)
-
-# get the correct version to use for checking stack features
-version_for_stack_feature_checks = get_stack_feature_version(config)
-
-stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
-stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user = status_params.hdfs_user
-root_user = "root"
-hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-namenode_pid_file = status_params.namenode_pid_file
-zkfc_pid_file = status_params.zkfc_pid_file
-datanode_pid_file = status_params.datanode_pid_file
-
-# Some datanode settings
-dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
-dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
-dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
-dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
-dfs_dn_ipc_address = config['configurations']['hdfs-site']['dfs.datanode.ipc.address']
-secure_dn_ports_are_in_use = False
-
-hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
-namenode_backup_dir = default("/configurations/hadoop-env/namenode_backup_dir", "/tmp/upgrades")
-
-# hadoop default parameters
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_bin = stack_select.get_hadoop_dir("sbin")
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hadoop_home = stack_select.get_hadoop_dir("home")
-hadoop_secure_dn_user = hdfs_user
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
-hadoop_lib_home = stack_select.get_hadoop_dir("lib")
-
-# hadoop parameters for stacks that support rolling_upgrade
-if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
-  mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
-
-  if not security_enabled:
-    hadoop_secure_dn_user = '""'
-  else:
-    dfs_dn_port = utils.get_port(dfs_dn_addr)
-    dfs_dn_http_port = utils.get_port(dfs_dn_http_addr)
-    dfs_dn_https_port = utils.get_port(dfs_dn_https_addr)
-    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
-    if dfs_http_policy == "HTTPS_ONLY":
-      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_https_port)
-    elif dfs_http_policy == "HTTP_AND_HTTPS":
-      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port) or utils.is_secure_port(dfs_dn_https_port)
-    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
-      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port)
-    if secure_dn_ports_are_in_use:
-      hadoop_secure_dn_user = hdfs_user
-    else:
-      hadoop_secure_dn_user = '""'
-
-ambari_libs_dir = "/var/lib/ambari-agent/lib"
-limits_conf_dir = "/etc/security/limits.d"
-
-hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
-hdfs_user_nproc_limit = default("/configurations/hadoop-env/hdfs_user_nproc_limit", "65536")
-
-create_lib_snappy_symlinks = check_stack_feature(StackFeature.SNAPPY, stack_version_formatted)
-jsvc_path = "/usr/lib/bigtop-utils"
-
-execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
-ulimit_cmd = "ulimit -c unlimited ; "
-
-snappy_so = "libsnappy.so"
-so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
-so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
-so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
-so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
-so_src_dir_x86 = format("{hadoop_home}/lib")
-so_src_dir_x64 = format("{hadoop_home}/lib64")
-so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
-so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
-
-#security params
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-falcon_user = config['configurations']['falcon-env']['falcon_user']
-
-#exclude file
-hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
-exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
-command_phase = default("/commandParams/phase","")
-
-klist_path_local = get_klist_path(default('/configurations/kerberos-env/executable_search_paths', None))
-kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-nm_host = default("/clusterHostInfo/nm_host", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
-falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
-
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_histroryserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_journalnode_hosts = not len(journalnode_hosts)  == 0
-has_zkfc_hosts = not len(zkfc_hosts)  == 0
-has_falcon_host = not len(falcon_host)  == 0
-
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-#users and groups
-yarn_user = config['configurations']['yarn-env']['yarn_user']
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-oozie_user = config['configurations']['oozie-env']['oozie_user']
-webhcat_user = config['configurations']['hive-env']['webhcat_user']
-hive_user = config['configurations']['hive-env']['hive_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
-mapred_user = config['configurations']['mapred-env']['mapred_user']
-hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
-
-user_group = config['configurations']['cluster-env']['user_group']
-root_group = "root"
-proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-nfs_file_dump_dir = config['configurations']['hdfs-site']['nfs.file.dump.dir']
-
-dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
-dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
-
-jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
-
-dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
-
-hdfs_log_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
-namenode_dirs_created_stub_dir = hdfs_log_dir
-namenode_dirs_stub_filename = "namenode_dirs_created"
-
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 0770
-
-hdfs_namenode_format_disabled = default("/configurations/cluster-env/hdfs_namenode_format_disabled", False)
-hdfs_namenode_formatted_mark_suffix = "/namenode-formatted/"
-hdfs_namenode_bootstrapped_mark_suffix = "/namenode-bootstrapped/"
-namenode_formatted_old_mark_dirs = ["/var/run/hadoop/hdfs/namenode-formatted", 
-  format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted"),
-  "/var/lib/hdfs/namenode/formatted"]
-dfs_name_dirs = dfs_name_dir.split(",")
-namenode_formatted_mark_dirs = []
-namenode_bootstrapped_mark_dirs = []
-for dn_dir in dfs_name_dirs:
- tmp_format_mark_dir = format("{dn_dir}{hdfs_namenode_formatted_mark_suffix}")
- tmp_bootstrap_mark_dir = format("{dn_dir}{hdfs_namenode_bootstrapped_mark_suffix}")
- namenode_formatted_mark_dirs.append(tmp_format_mark_dir)
- namenode_bootstrapped_mark_dirs.append(tmp_bootstrap_mark_dir)
-
-# Use the namenode RPC address if configured, otherwise, fallback to the default file system
-namenode_address = None
-if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
-  namenode_rpcaddress = config['configurations']['hdfs-site']['dfs.namenode.rpc-address']
-  namenode_address = format("hdfs://{namenode_rpcaddress}")
-else:
-  namenode_address = config['configurations']['core-site']['fs.defaultFS']
-
-fs_checkpoint_dirs = default("/configurations/hdfs-site/dfs.namenode.checkpoint.dir", "").split(',')
-
-dfs_data_dirs = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
-
-data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
-
-# HDFS High Availability properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
-if dfs_ha_nameservices is None:
-  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-dfs_ha_automatic_failover_enabled = default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
-
-# hostname of the active HDFS HA Namenode (only used when HA is enabled)
-dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
-# hostname of the standby HDFS HA Namenode (only used when HA is enabled)
-dfs_ha_namenode_standby = default("/configurations/hadoop-env/dfs_ha_initial_namenode_standby", None)
-
-# Values for the current Host
-namenode_id = None
-namenode_rpc = None
-
-dfs_ha_namemodes_ids_list = []
-other_namenode_id = None
-
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namemodes_ids_list:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    if hostname in nn_host:
-      namenode_id = nn_id
-      namenode_rpc = nn_host
-  # With HA enabled namenode_address is recomputed
-  namenode_address = format('hdfs://{dfs_ha_nameservices}')
-
-  # Calculate the namenode id of the other namenode. This is needed during RU to initiate an HA failover using ZKFC.
-  if namenode_id is not None and len(dfs_ha_namemodes_ids_list) == 2:
-    other_namenode_id = list(set(dfs_ha_namemodes_ids_list) - set([namenode_id]))[0]
-
-
-if dfs_http_policy is not None and dfs_http_policy.upper() == "HTTPS_ONLY":
-  https_only = True
-  journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.https-address', None)
-else:
-  https_only = False
-  journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
-
-if journalnode_address:
-  journalnode_port = journalnode_address.split(":")[1]
-  
-  
-if security_enabled:
-  dn_principal_name = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
-  dn_keytab = config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
-  dn_principal_name = dn_principal_name.replace('_HOST',hostname.lower())
-  
-  dn_kinit_cmd = format("{kinit_path_local} -kt {dn_keytab} {dn_principal_name};")
-  
-  nn_principal_name = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
-  nn_keytab = config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
-  nn_principal_name = nn_principal_name.replace('_HOST',hostname.lower())
-  
-  nn_kinit_cmd = format("{kinit_path_local} -kt {nn_keytab} {nn_principal_name};")
-
-  jn_principal_name = default("/configurations/hdfs-site/dfs.journalnode.kerberos.principal", None)
-  if jn_principal_name:
-    jn_principal_name = jn_principal_name.replace('_HOST', hostname.lower())
-  jn_keytab = default("/configurations/hdfs-site/dfs.journalnode.keytab.file", None)
-  hdfs_kinit_cmd = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
-else:
-  dn_kinit_cmd = ""
-  nn_kinit_cmd = ""
-  hdfs_kinit_cmd = ""
-
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-
-dfs_type = default("/commandParams/dfs_type", "")
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs,
-  immutable_paths = get_not_managed_resources(),
-  dfs_type = dfs_type
-)
-
-
-# The logic for LZO also exists in OOZIE's params.py
-io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
-lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-lzo_packages = get_lzo_packages(stack_version_unformatted)
-  
-name_node_params = default("/commandParams/namenode", None)
-
-java_home = config['hostLevelParams']['java_home']
-java_version = expect("/hostLevelParams/java_version", int)
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-# ranger host
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-
-#ranger hdfs properties
-policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
-if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
-  policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
-xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
-xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
-xa_db_host = config['configurations']['admin-properties']['db_host']
-repo_name = str(config['clusterName']) + '_hadoop'
-
-hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-hadoop_security_authorization = config['configurations']['core-site']['hadoop.security.authorization']
-fs_default_name = config['configurations']['core-site']['fs.defaultFS']
-hadoop_security_auth_to_local = config['configurations']['core-site']['hadoop.security.auth_to_local']
-hadoop_rpc_protection = config['configurations']['ranger-hdfs-plugin-properties']['hadoop.rpc.protection']
-common_name_for_certificate = config['configurations']['ranger-hdfs-plugin-properties']['common.name.for.certificate']
-
-repo_config_username = config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
-
-if security_enabled:
-  sn_principal_name = default("/configurations/hdfs-site/dfs.secondary.namenode.kerberos.principal", "nn/_HOST@EXAMPLE.COM")
-  sn_principal_name = sn_principal_name.replace('_HOST',hostname.lower())
-
-ranger_env = config['configurations']['ranger-env']
-ranger_plugin_properties = config['configurations']['ranger-hdfs-plugin-properties']
-policy_user = config['configurations']['ranger-hdfs-plugin-properties']['policy_user']
-
-#For curl command in ranger plugin to get db connector
-jdk_location = config['hostLevelParams']['jdk_location']
-java_share_dir = '/usr/share/java'
-
-is_https_enabled = is_https_enabled_in_hdfs(config['configurations']['hdfs-site']['dfs.http.policy'],
-                                            config['configurations']['hdfs-site']['dfs.https.enable'])
-
-if has_ranger_admin:
-  enable_ranger_hdfs = (config['configurations']['ranger-hdfs-plugin-properties']['ranger-hdfs-plugin-enabled'].lower() == 'yes')
-  xa_audit_db_password = ''
-  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
-    xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
-  repo_config_password = unicode(config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
-  xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
-  previous_jdbc_jar_name = None
-
-  if stack_supports_ranger_audit_db:
-
-    if xa_audit_db_flavor == 'mysql':
-      jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "com.mysql.jdbc.Driver"
-    elif xa_audit_db_flavor == 'oracle':
-      jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
-      colon_count = xa_db_host.count(':')
-      if colon_count == 2 or colon_count == 0:
-        audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
-      else:
-        audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
-      jdbc_driver = "oracle.jdbc.OracleDriver"
-    elif xa_audit_db_flavor == 'postgres':
-      jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "org.postgresql.Driver"
-    elif xa_audit_db_flavor == 'mssql':
-      jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
-      jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
-    elif xa_audit_db_flavor == 'sqla':
-      jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
-      jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
-
-  downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  driver_curl_target = format("{hadoop_lib_home}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  previous_jdbc_jar = format("{hadoop_lib_home}/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-
-  sql_connector_jar = ''
-
-  hdfs_ranger_plugin_config = {
-    'username': repo_config_username,
-    'password': repo_config_password,
-    'hadoop.security.authentication': hadoop_security_authentication,
-    'hadoop.security.authorization': hadoop_security_authorization,
-    'fs.default.name': fs_default_name,
-    'hadoop.security.auth_to_local': hadoop_security_auth_to_local,
-    'hadoop.rpc.protection': hadoop_rpc_protection,
-    'commonNameForCertificate': common_name_for_certificate,
-    'dfs.datanode.kerberos.principal': dn_principal_name if security_enabled else '',
-    'dfs.namenode.kerberos.principal': nn_principal_name if security_enabled else '',
-    'dfs.secondary.namenode.kerberos.principal': sn_principal_name if security_enabled else ''
-  }
-
-  hdfs_ranger_plugin_repo = {
-    'isActive': 'true',
-    'config': json.dumps(hdfs_ranger_plugin_config),
-    'description': 'hdfs repo',
-    'name': repo_name,
-    'repositoryType': 'hdfs',
-    'assetType': '1'
-  }
-  if stack_supports_ranger_kerberos and security_enabled:
-    hdfs_ranger_plugin_config['policy.download.auth.users'] = hdfs_user
-    hdfs_ranger_plugin_config['tag.download.auth.users'] = hdfs_user
-
-  if stack_supports_ranger_kerberos:
-    hdfs_ranger_plugin_config['ambari.service.check.user'] = policy_user
-
-    hdfs_ranger_plugin_repo = {
-      'isEnabled': 'true',
-      'configs': hdfs_ranger_plugin_config,
-      'description': 'hdfs repo',
-      'name': repo_name,
-      'type': 'hdfs'
-    }
-
-  xa_audit_db_is_enabled = False
-  ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
-  if xml_configurations_supported and stack_supports_ranger_audit_db:
-    xa_audit_db_is_enabled = config['configurations']['ranger-hdfs-audit']['xasecure.audit.destination.db']
-  xa_audit_hdfs_is_enabled = config['configurations']['ranger-hdfs-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
-  ssl_keystore_password = unicode(config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
-  ssl_truststore_password = unicode(config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
-  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
-
-  #For SQLA explicitly disable audit to DB for Ranger
-  if xa_audit_db_flavor == 'sqla':
-    xa_audit_db_is_enabled = False

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_windows.py
deleted file mode 100644
index 70d95a6..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_windows.py
+++ /dev/null
@@ -1,79 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-#Used in subsequent imports from params
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.format import format
-from install_params import exclude_packages
-from status_params import *
-
-config = Script.get_config()
-hadoop_conf_dir = None
-hbase_conf_dir = None
-hadoop_home = None
-try:
-  hadoop_conf_dir = os.environ["HADOOP_CONF_DIR"]
-  hbase_conf_dir = os.environ["HBASE_CONF_DIR"]
-  hadoop_home = os.environ["HADOOP_HOME"]
-except:
-  pass
-#directories & files
-dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
-fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir']
-dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
-#decomission
-hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
-exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-# HDFS High Availability properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.internal.nameservices", None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-
-namenode_id = None
-namenode_rpc = None
-hostname = config["hostname"]
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namemodes_ids_list:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    if hostname in nn_host:
-      namenode_id = nn_id
-      namenode_rpc = nn_host
-
-hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
-hdfs_user = hadoop_user
-
-grep_exe = "findstr"
-
-name_node_params = default("/commandParams/namenode", None)
-
-service_map = {
-  "datanode" : datanode_win_service_name,
-  "journalnode" : journalnode_win_service_name,
-  "namenode" : namenode_win_service_name,
-  "secondarynamenode" : snamenode_win_service_name,
-  "zkfc_slave": zkfc_win_service_name
-}


[09/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/MAPREDUCE2_metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/MAPREDUCE2_metrics.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/MAPREDUCE2_metrics.json
deleted file mode 100644
index f44e3b2..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/MAPREDUCE2_metrics.json
+++ /dev/null
@@ -1,2596 +0,0 @@
-{
-  "HISTORYSERVER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "rpc.metrics.RpcAuthorizationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.ugi.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/part_max_used": {
-              "metric": "part_max_used",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.ugi.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_aidle": {
-              "metric": "cpu_aidle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/boottime": {
-              "metric": "boottime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.ugi.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.ugi.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "rpc.metrics.RpcAuthenticationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "rpc.metrics.RpcAuthenticationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "default": {
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.ReceivedBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.SentBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTimeNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTimeAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTimeNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTimeAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthenticationFailures": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthenticationSuccesses": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthorizationFailures": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthorizationSuccesses": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.NumOpenConnections",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/CallQueueLength": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.CallQueueLength",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemNonHeapUsedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemNonHeapCommittedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemHeapUsedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemHeapCommittedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemMaxM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemMaxM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcCountCopy": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountCopy",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcTimeMillisCopy": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisCopy",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcCountMarkSweepCompact": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcTimeMillisMarkSweepCompact": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcTimeMillis": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillis",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsNew": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsNew",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsRunnable": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsRunnable",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsBlocked": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsWaiting": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsTimedWaiting": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsTerminated": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTerminated",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogFatal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogFatal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogError": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogError",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogWarn": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogWarn",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogInfo": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogInfo",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryMax": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryCommitted": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[committed]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryInit": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[init]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryMax": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryCommitted": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[committed]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryInit": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[init]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/MBeanServerId": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.MBeanServerId",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/SpecificationName": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/SpecificationVersion": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVersion",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/SpecificationVendor": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVendor",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/ImplementationName": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/ImplementationVersion": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVersion",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/ImplementationVendor": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVendor",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/ElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.ElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/PercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.PercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImageCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImageElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImageTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImagePercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImagePercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsPercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsPercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointPercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointPercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModeCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModeElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModeTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModePercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModePercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/HotSpotDiagnostic/DiagnosticOptions": {
-              "metric": "com.sun.management:type=HotSpotDiagnostic.DiagnosticOptions",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/MemoryPoolNames": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.MemoryPoolNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/Name": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/Valid": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/ObjectName": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.ObjectName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Logging/LoggerNames": {
-              "metric": "java.util.logging:type=Logging.LoggerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginSuccessNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginSuccessAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginFailureNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginFailureAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/MemoryManagerNames": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.MemoryManagerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/PeakUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.PeakUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Usage": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Usage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/UsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.UsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Name": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Type": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Type",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Valid": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadAllocatedMemoryEnabled": {
-              "metric": "java.lang:type=Threading.ThreadAllocatedMemoryEnabled",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadAllocatedMemorySupported": {
-              "metric": "java.lang:type=Threading.ThreadAllocatedMemorySupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/DaemonThreadCount": {
-              "metric": "java.lang:type=Threading.DaemonThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/PeakThreadCount": {
-              "metric": "java.lang:type=Threading.PeakThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/CurrentThreadCpuTimeSupported": {
-              "metric": "java.lang:type=Threading.CurrentThreadCpuTimeSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ObjectMonitorUsageSupported": {
-              "metric": "java.lang:type=Threading.ObjectMonitorUsageSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/SynchronizerUsageSupported": {
-              "metric": "java.lang:type=Threading.SynchronizerUsageSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadContentionMonitoringSupported": {
-              "metric": "java.lang:type=Threading.ThreadContentionMonitoringSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadCpuTimeEnabled": {
-              "metric": "java.lang:type=Threading.ThreadCpuTimeEnabled",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/CurrentThreadCpuTime": {
-              "metric": "java.lang:type=Threading.CurrentThreadCpuTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/CurrentThreadUserTime": {
-              "metric": "java.lang:type=Threading.CurrentThreadUserTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadCount": {
-              "metric": "java.lang:type=Threading.ThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/TotalStartedThreadCount": {
-              "metric": "java.lang:type=Threading.TotalStartedThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadCpuTimeSupported": {
-              "metric": "java.lang:type=Threading.ThreadCpuTimeSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadContentionMonitoringEnabled": {
-              "metric": "java.lang:type=Threading.ThreadContentionMonitoringEnabled",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/AllThreadIds": {
-              "metric": "java.lang:type=Threading.AllThreadIds",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ClassLoading/LoadedClassCount": {
-              "metric": "java.lang:type=ClassLoading.LoadedClassCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ClassLoading/UnloadedClassCount": {
-              "metric": "java.lang:type=ClassLoading.UnloadedClassCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ClassLoading/TotalLoadedClassCount": {
-              "metric": "java.lang:type=ClassLoading.TotalLoadedClassCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ClassLoading/Verbose": {
-              "metric": "java.lang:type=ClassLoading.Verbose",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/BootClassPath": {
-              "metric": "java.lang:type=Runtime.BootClassPath",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/LibraryPath": {
-              "metric": "java.lang:type=Runtime.LibraryPath",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/VmName": {
-              "metric": "java.lang:type=Runtime.VmName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/VmVendor": {
-              "metric": "java.lang:type=Runtime.VmVendor",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/VmVersion": {
-              "metric": "java.lang:type=Runtime.VmVersion",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/BootClassPathSupported": {
-              "metric": "java.lang:type=Runtime.BootClassPathSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/StartTime": {
-              "metric": "java.lang:type=Runtime.StartTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/InputArguments": {
-              "metric": "java.lang:type=Runtime.InputArguments",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/ManagementSpecVersion": {
-              "metric": "java.lang:type=Runtime.ManagementSpecVersion",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/SpecName": {
-              "metric": "java.lang:type=Runtime.SpecName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/SpecVendor": {
-              "metric": "java.lang:type=Runtime.SpecVendor",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/SpecVersion": {
-              "metric": "java.lang:type=Runtime.SpecVersion",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/SystemProperties": {
-              "metric": "java.lang:type=Runtime.SystemProperties",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/Uptime": {
-              "metric": "java.lang:type=Runtime.Uptime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/Name": {
-              "metric": "java.lang:type=Runtime.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/ClassPath": {
-              "metric": "java.lang:type=Runtime.ClassPath",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/MaxFileDescriptorCount": {
-              "metric": "java.lang:type=OperatingSystem.MaxFileDescriptorCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/OpenFileDescriptorCount": {
-              "metric": "java.lang:type=OperatingSystem.OpenFileDescriptorCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/CommittedVirtualMemorySize": {
-              "metric": "java.lang:type=OperatingSystem.CommittedVirtualMemorySize",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/FreePhysicalMemorySize": {
-              "metric": "java.lang:type=OperatingSystem.FreePhysicalMemorySize",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/FreeSwapSpaceSize": {
-              "metric": "java.lang:type=OperatingSystem.FreeSwapSpaceSize",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/ProcessCpuLoad": {
-              "metric": "java.lang:type=OperatingSystem.ProcessCpuLoad",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/ProcessCpuTime": {
-              "metric": "java.lang:type=OperatingSystem.ProcessCpuTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/SystemCpuLoad": {
-              "metric": "java.lang:type=OperatingSystem.SystemCpuLoad",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/TotalPhysicalMemorySize": {
-              "metric": "java.lang:type=OperatingSystem.TotalPhysicalMemorySize",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/TotalSwapSpaceSize": {
-              "metric": "java.lang:type=OperatingSystem.TotalSwapSpaceSize",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/AvailableProcessors": {
-              "metric": "java.lang:type=OperatingSystem.AvailableProcessors",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/Version": {
-              "metric": "java.lang:type=OperatingSystem.Version",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/Arch": {
-              "metric": "java.lang:type=OperatingSystem.Arch",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/SystemLoadAverage": {
-              "metric": "java.lang:type=OperatingSystem.SystemLoadAverage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/Name": {
-              "metric": "java.lang:type=OperatingSystem.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/CollectionUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/CollectionUsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/CollectionUsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/MemoryManagerNames": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.MemoryManagerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/PeakUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.PeakUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/Usage": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Usage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/UsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/UsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/CollectionUsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/CollectionUsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/UsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/UsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/Name": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/Type": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Type",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/Valid": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/mapred/Count": {
-              "metric": "java.nio:type=BufferPool,name=mapped.Count",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/mapred/MemoryUsed": {
-              "metric": "java.nio:type=BufferPool,name=mapped.MemoryUsed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/mapred/TotalCapacity": {
-              "metric": "java.nio:type=BufferPool,name=mapped.TotalCapacity",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/mapred/Name": {
-              "metric": "java.nio:type=BufferPool,name=mapped.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/direct/Count": {
-              "metric": "java.nio:type=BufferPool,name=direct.Count",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/direct/MemoryUsed": {
-              "metric": "java.nio:type=BufferPool,name=direct.MemoryUsed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/direct/TotalCapacity": {
-              "metric": "java.nio:type=BufferPool,name=direct.TotalCapacity",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/direct/Name": {
-              "metric": "java.nio:type=BufferPool,name=direct.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/MarkSweepCompact/LastGcInfo": {
-              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.LastGcInfo",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/MarkSweepCompact/CollectionCount": {
-              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.CollectionCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/MarkSweepCompact/CollectionTime": {
-              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.CollectionTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/MarkSweepCompact/MemoryPoolNames": {
-              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.MemoryPoolNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/MarkSweepCompact/Name": {
-              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/MarkSweepCompact/Valid": {
-              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/Copy/LastGcInfo": {
-              "metric": "java.lang:type=GarbageCollector,name=Copy.LastGcInfo",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/Copy/CollectionCount": {
-              "metric": "java.lang:type=GarbageCollector,name=Copy.CollectionCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/Copy/CollectionTime": {
-              "metric": "java.lang:type=GarbageCollector,name=Copy.CollectionTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/Copy/MemoryPoolNames": {
-              "metric": "java.lang:type=GarbageCollector,name=Copy.MemoryPoolNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/Copy/Name": {
-              "metric": "java.lang:type=GarbageCollector,name=Copy.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/Copy/Valid": {
-              "metric": "java.lang:type=GarbageCollector,name=Copy.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/CollectionUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.CollectionUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/MemoryManagerNames": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.MemoryManagerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/PeakUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.PeakUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/Usage": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.Usage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/UsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/UsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/CollectionUsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.CollectionUsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/UsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/UsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/Name": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/Type": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.Type",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/Valid": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/CollectionUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/CollectionUsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/MemoryManagerNames": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.MemoryManagerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/PeakUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.PeakUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/Usage": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.Usage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/UsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.UsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/Name": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/Type": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.Type",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/Valid": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/CollectionUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/CollectionUsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/MemoryManagerNames": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.MemoryManagerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/PeakUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.PeakUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/Usage": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Usage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/UsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/UsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/UsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/UsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/Name": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/Type": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Type",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/Valid": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Valid",
-              "pointInTime": true,
-              "temporal": false
-            }
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "rpc.metrics.RpcAuthorizationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.ugi.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/part_max_used": {
-              "metric": "part_max_used",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.ugi.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_aidle": {
-              "metric": "cpu_aidle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/boottime": {
-              "metric": "boottime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.ugi.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.ugi.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "rpc.metrics.RpcAuthenticationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "rpc.metrics.RpcAuthenticationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "default": {
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.ReceivedBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.SentBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTimeNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTimeAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTimeNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTimeAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthenticationFailures": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthenticationSuccesses": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthorizationFailures": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthorizationSuccesses": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.NumOpenConnections",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/CallQueueLength": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.CallQueueLength",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemNonHeapUsedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemNonHeapCommittedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemHeapUsedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemHeapCommittedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemMaxM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemMaxM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcCountCopy": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountCopy",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcTimeMillisCopy": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisCopy",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcCountMarkSweepCompact": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcTimeMillisMarkSweepCompact": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcTimeMillis": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillis",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsNew": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsNew",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsRunnable": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsRunnable",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsBlocked": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsWaiting": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsTimedWaiting": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsTerminated": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTerminated",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogFatal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogFatal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogError": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogError",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogWarn": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogWarn",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogInfo": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogInfo",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryMax": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryCommitted": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[committed]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryInit": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[init]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryMax": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryCommitted": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[committed]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryInit": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[init]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/MBeanServerId": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.MBeanServerId",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/SpecificationName": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/SpecificationVersion": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVersion",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/SpecificationVendor": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVendor",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/ImplementationName": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/ImplementationVersion": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVersion",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/ImplementationVendor": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVendor",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/ElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.ElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/PercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.PercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImageCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImageElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImageTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImagePercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImagePercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsPercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsPercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointPercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointPercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModeCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModeElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModeTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModePercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModePercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/HotSpotDiagnostic/DiagnosticOptions": {
-              "metric": "com.sun.management:type=HotSpotDiagnostic.DiagnosticOptions",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/MemoryPoolNames": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.MemoryPoolNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/Name": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/Valid": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/ObjectName": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.ObjectName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Logging/LoggerNames": {
-              "metric": "java.util.logging:type=Logging.LoggerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginSuccessNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginSuccessAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginFailureNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginFailureAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/MemoryManagerNames": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.MemoryManagerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/PeakUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.PeakUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Usage": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Usage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/UsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.UsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Name": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Type": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Type",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Valid": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadAllocatedMemoryEnabled": {
-              "metric": "java.lang:type=Threading.ThreadAllocatedMemoryEnabled",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadAllocatedMemorySupported": {
-              "metric": "java.lang:type=Threading.ThreadAllocatedMemorySupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/DaemonThreadCount": {
-              "metric": "java.lang:type=Threading.DaemonThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/PeakThreadCount": {
-              "metric": "java.lang:type=Threading.PeakThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/CurrentThreadCpuTimeSupported": {
-              "metric": "java.lang:type=Threading.CurrentThreadCpuTimeSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ObjectMonitorUsageSupported": {
-              "metric": "java.lang:type=Threading.ObjectMonitorUsageSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/SynchronizerUsageSupported": {
-              "metric": "java.lang:type=Threading.SynchronizerUsageSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadContentionMonitoringSupported": {
-              "metric": "java.lang:type=Threading.ThreadContentionMonitoringSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadCpuTimeEnabled": {
-              "metric": "java.lang:type=Threading.ThreadCpuTimeEnabled",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/CurrentThreadCpuTime": {
-              "metric": "java.lang:type=Threading.CurrentThreadCpuTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/CurrentThreadUserTime": {
-              "metric": "java.lang:type=Threading.CurrentThreadUserTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadCount": {
-              "metric": "java.lang:type=Threading.ThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/TotalStartedThreadCount": {
-              "metric": "java.lang:type=Threading.TotalStartedThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadCpuTimeSupported": {
-              "metric": "java.lang:type=Threading.ThreadCpuTimeSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadContentionMonitoringEnabled": {
-              "metric": "java.lang:type=Threading.ThreadContentionMonitoringEnabled",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/AllThreadIds": {
-              "metric": "java.lang:type=Threading.AllThrea

<TRUNCATED>

[15/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_checkpoint_time.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_checkpoint_time.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_checkpoint_time.py
deleted file mode 100644
index 26127c3..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_checkpoint_time.py
+++ /dev/null
@@ -1,255 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import time
-import urllib2
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import logging
-import traceback
-
-from resource_management.libraries.functions.namenode_ha_utils import get_all_namenode_addresses
-from resource_management.libraries.functions.curl_krb_request import curl_krb_request
-from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
-from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
-from resource_management.core.environment import Environment
-
-LABEL = 'Last Checkpoint: [{h} hours, {m} minutes, {tx} transactions]'
-HDFS_SITE_KEY = '{{hdfs-site}}'
-
-RESULT_STATE_UNKNOWN = 'UNKNOWN'
-RESULT_STATE_SKIPPED = 'SKIPPED'
-
-NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
-NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
-NN_HTTP_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
-NN_CHECKPOINT_TX_KEY = '{{hdfs-site/dfs.namenode.checkpoint.txns}}'
-NN_CHECKPOINT_PERIOD_KEY = '{{hdfs-site/dfs.namenode.checkpoint.period}}'
-
-PERCENT_WARNING_KEY = 'checkpoint.time.warning.threshold'
-PERCENT_WARNING_DEFAULT = 200
-
-PERCENT_CRITICAL_KEY = 'checkpoint.time.critical.threshold'
-PERCENT_CRITICAL_DEFAULT = 200
-
-CHECKPOINT_TX_MULTIPLIER_WARNING_KEY = 'checkpoint.txns.multiplier.warning.threshold'
-CHECKPOINT_TX_MULTIPLIER_WARNING_DEFAULT = 2
-
-CHECKPOINT_TX_MULTIPLIER_CRITICAL_KEY = 'checkpoint.txns.multiplier.critical.threshold'
-CHECKPOINT_TX_MULTIPLIER_CRITICAL_DEFAULT = 4
-
-CHECKPOINT_TX_DEFAULT = 1000000
-CHECKPOINT_PERIOD_DEFAULT = 21600
-
-CONNECTION_TIMEOUT_KEY = 'connection.timeout'
-CONNECTION_TIMEOUT_DEFAULT = 5.0
-
-KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
-KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-SMOKEUSER_KEY = "{{cluster-env/smokeuser}}"
-EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
-
-logger = logging.getLogger('ambari_alerts')
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (HDFS_SITE_KEY, NN_HTTP_ADDRESS_KEY, NN_HTTPS_ADDRESS_KEY, NN_HTTP_POLICY_KEY, EXECUTABLE_SEARCH_PATHS,
-      NN_CHECKPOINT_TX_KEY, NN_CHECKPOINT_PERIOD_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY, SMOKEUSER_KEY)
-  
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  if configurations is None:
-    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
-  
-  uri = None
-  scheme = 'http'  
-  http_uri = None
-  https_uri = None
-  http_policy = 'HTTP_ONLY'
-  checkpoint_tx = CHECKPOINT_TX_DEFAULT
-  checkpoint_period = CHECKPOINT_PERIOD_DEFAULT
-
-  # hdfs-site is required
-  if not HDFS_SITE_KEY in configurations:
-    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
-
-  if NN_HTTP_POLICY_KEY in configurations:
-    http_policy = configurations[NN_HTTP_POLICY_KEY]
-
-  if NN_CHECKPOINT_TX_KEY in configurations:
-    checkpoint_tx = configurations[NN_CHECKPOINT_TX_KEY]
-
-  if NN_CHECKPOINT_PERIOD_KEY in configurations:
-    checkpoint_period = configurations[NN_CHECKPOINT_PERIOD_KEY]
-    
-  if SMOKEUSER_KEY in configurations:
-    smokeuser = configurations[SMOKEUSER_KEY]
-
-  executable_paths = None
-  if EXECUTABLE_SEARCH_PATHS in configurations:
-    executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
-
-  security_enabled = False
-  if SECURITY_ENABLED_KEY in configurations:
-    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
-
-  kerberos_keytab = None
-  if KERBEROS_KEYTAB in configurations:
-    kerberos_keytab = configurations[KERBEROS_KEYTAB]
-
-  kerberos_principal = None
-  if KERBEROS_PRINCIPAL in configurations:
-    kerberos_principal = configurations[KERBEROS_PRINCIPAL]
-    kerberos_principal = kerberos_principal.replace('_HOST', host_name)
-
-  # parse script arguments
-  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
-  if CONNECTION_TIMEOUT_KEY in parameters:
-    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
-
-  percent_warning = PERCENT_WARNING_DEFAULT
-  if PERCENT_WARNING_KEY in parameters:
-    percent_warning = float(parameters[PERCENT_WARNING_KEY])
-
-  percent_critical = PERCENT_CRITICAL_DEFAULT
-  if PERCENT_CRITICAL_KEY in parameters:
-    percent_critical = float(parameters[PERCENT_CRITICAL_KEY])
-
-  checkpoint_txn_multiplier_warning = CHECKPOINT_TX_MULTIPLIER_WARNING_DEFAULT
-  if CHECKPOINT_TX_MULTIPLIER_WARNING_KEY in parameters:
-    checkpoint_txn_multiplier_warning = float(parameters[CHECKPOINT_TX_MULTIPLIER_WARNING_KEY])
-
-  checkpoint_txn_multiplier_critical = CHECKPOINT_TX_MULTIPLIER_CRITICAL_DEFAULT
-  if CHECKPOINT_TX_MULTIPLIER_CRITICAL_KEY in parameters:
-    checkpoint_txn_multiplier_critical = float(parameters[CHECKPOINT_TX_MULTIPLIER_CRITICAL_KEY])
-
-  kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
-
-  # determine the right URI and whether to use SSL
-  hdfs_site = configurations[HDFS_SITE_KEY]
-
-  scheme = "https" if http_policy == "HTTPS_ONLY" else "http"
-
-  nn_addresses = get_all_namenode_addresses(hdfs_site)
-  for nn_address in nn_addresses:
-    if nn_address.startswith(host_name + ":"):
-      uri = nn_address
-      break
-  if not uri:
-    return (RESULT_STATE_SKIPPED, ['NameNode on host {0} not found (namenode adresses = {1})'.format(host_name, ', '.join(nn_addresses))])
-
-  current_time = int(round(time.time() * 1000))
-
-  last_checkpoint_time_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem".format(scheme,uri)
-  journal_transaction_info_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo".format(scheme,uri)
-
-  # start out assuming an OK status
-  label = None
-  result_code = "OK"
-
-  try:
-    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
-      env = Environment.get_instance()
-
-      # curl requires an integer timeout
-      curl_connection_timeout = int(connection_timeout)
-
-      last_checkpoint_time_response, error_msg, time_millis = curl_krb_request(env.tmp_dir, kerberos_keytab,
-        kerberos_principal, last_checkpoint_time_qry,"checkpoint_time_alert", executable_paths, False,
-        "NameNode Last Checkpoint", smokeuser, connection_timeout=curl_connection_timeout,
-        kinit_timer_ms = kinit_timer_ms)
-
-      last_checkpoint_time_response_json = json.loads(last_checkpoint_time_response)
-      last_checkpoint_time = int(last_checkpoint_time_response_json["beans"][0]["LastCheckpointTime"])
-
-      journal_transaction_info_response, error_msg, time_millis = curl_krb_request(env.tmp_dir, kerberos_keytab,
-        kerberos_principal, journal_transaction_info_qry,"checkpoint_time_alert", executable_paths,
-        False, "NameNode Last Checkpoint", smokeuser, connection_timeout=curl_connection_timeout,
-        kinit_timer_ms = kinit_timer_ms)
-
-      journal_transaction_info_response_json = json.loads(journal_transaction_info_response)
-      journal_transaction_info = journal_transaction_info_response_json["beans"][0]["JournalTransactionInfo"]
-    else:
-      last_checkpoint_time = int(get_value_from_jmx(last_checkpoint_time_qry,
-      "LastCheckpointTime", connection_timeout))
-
-      journal_transaction_info = get_value_from_jmx(journal_transaction_info_qry,
-      "JournalTransactionInfo", connection_timeout)
-
-    journal_transaction_info_dict = json.loads(journal_transaction_info)
-  
-    last_tx = int(journal_transaction_info_dict['LastAppliedOrWrittenTxId'])
-    most_recent_tx = int(journal_transaction_info_dict['MostRecentCheckpointTxId'])
-    transaction_difference = last_tx - most_recent_tx
-    
-    delta = (current_time - last_checkpoint_time)/1000
-
-    label = LABEL.format(h=get_time(delta)['h'], m=get_time(delta)['m'], tx=transaction_difference)
-
-    is_checkpoint_txn_warning = transaction_difference > checkpoint_txn_multiplier_warning * int(checkpoint_tx)
-    is_checkpoint_txn_critical = transaction_difference > checkpoint_txn_multiplier_critical * int(checkpoint_tx)
-
-    # Either too many uncommitted transactions or missed check-pointing for
-    # long time decided by the thresholds
-    if is_checkpoint_txn_critical or (float(delta) / int(checkpoint_period)*100 >= int(percent_critical)):
-      logger.debug('Raising critical alert: transaction_difference = {0}, checkpoint_tx = {1}'.format(transaction_difference, checkpoint_tx))
-      result_code = 'CRITICAL'
-    elif is_checkpoint_txn_warning or (float(delta) / int(checkpoint_period)*100 >= int(percent_warning)):
-      logger.debug('Raising warning alert: transaction_difference = {0}, checkpoint_tx = {1}'.format(transaction_difference, checkpoint_tx))
-      result_code = 'WARNING'
-
-  except:
-    label = traceback.format_exc()
-    result_code = 'UNKNOWN'
-        
-  return ((result_code, [label]))
-
-def get_time(delta):
-  h = int(delta/3600)
-  m = int((delta % 3600)/60)
-  return {'h':h, 'm':m}
-
-
-def get_value_from_jmx(query, jmx_property, connection_timeout):
-  response = None
-  
-  try:
-    response = urllib2.urlopen(query, timeout=connection_timeout)
-    data = response.read()
-    data_dict = json.loads(data)
-    return data_dict["beans"][0][jmx_property]
-  finally:
-    if response is not None:
-      try:
-        response.close()
-      except:
-        pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_datanode_unmounted_data_dir.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_datanode_unmounted_data_dir.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_datanode_unmounted_data_dir.py
deleted file mode 100644
index 765831d..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_datanode_unmounted_data_dir.py
+++ /dev/null
@@ -1,177 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import os
-import logging
-import urlparse
-
-from resource_management.libraries.functions import file_system
-from resource_management.libraries.functions import mounted_dirs_helper
-
-RESULT_STATE_OK = 'OK'
-RESULT_STATE_WARNING = 'WARNING'
-RESULT_STATE_CRITICAL = 'CRITICAL'
-RESULT_STATE_UNKNOWN = 'UNKNOWN'
-
-DFS_DATA_DIR = '{{hdfs-site/dfs.datanode.data.dir}}'
-DATA_STORAGE_TAGS = ['[DISK]','[SSD]','[RAM_DISK]','[ARCHIVE]']
-DATA_DIR_MOUNT_FILE = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
-
-logger = logging.getLogger()
-
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (DFS_DATA_DIR, DATA_DIR_MOUNT_FILE)
-
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-
-  DataNode directories can be of the following formats and each needs to be supported:
-    /grid/dn/archive0
-    [SSD]/grid/dn/archive0
-    [ARCHIVE]file:///grid/dn/archive0
-  """
-  warnings = []
-  errors = []
-
-  if configurations is None:
-    return (RESULT_STATE_UNKNOWN, ['There were no configurations supplied to the script.'])
-
-  # Check required properties
-  if DFS_DATA_DIR not in configurations:
-    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(DFS_DATA_DIR)])
-
-  dfs_data_dir = configurations[DFS_DATA_DIR]
-
-  if dfs_data_dir is None:
-    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script and the value is null'.format(DFS_DATA_DIR)])
-
-  # This follows symlinks and will return False for a broken link (even in the middle of the linked list)
-  data_dir_mount_file_exists = True
-  if not os.path.exists(DATA_DIR_MOUNT_FILE):
-    data_dir_mount_file_exists = False
-    warnings.append("{0} was not found.".format(DATA_DIR_MOUNT_FILE))
-
-  normalized_data_dirs = set()            # data dirs that have been normalized
-  data_dirs_not_exist = set()        # data dirs that do not exist
-  data_dirs_unknown = set()          # data dirs for which could not determine mount
-  data_dirs_on_root = set()          # set of data dirs that are on root mount
-  data_dirs_on_mount = set()         # set of data dirs that are mounted on a device
-  data_dirs_unmounted = []           # list of data dirs that are known to have become unmounted
-
-  # transform each data directory into something that we can use
-  for data_dir in dfs_data_dir.split(","):
-    if data_dir is None or data_dir.strip() == "":
-      continue
-
-    data_dir = data_dir.strip()
-
-    # filter out data storage tags
-    for tag in DATA_STORAGE_TAGS:
-      if data_dir.startswith(tag):
-        data_dir = data_dir.replace(tag, "")
-        continue
-
-    # parse the path in case it contains a URI scheme
-    data_dir = urlparse.urlparse(data_dir).path
-
-    normalized_data_dirs.add(data_dir)
-
-  # Sort the data dirs, which is needed for deterministic behavior when running the unit tests.
-  normalized_data_dirs = sorted(normalized_data_dirs)
-  for data_dir in normalized_data_dirs:
-    # This follows symlinks and will return False for a broken link (even in the middle of the linked list)
-    if os.path.isdir(data_dir):
-      curr_mount_point = file_system.get_mount_point_for_dir(data_dir)
-      curr_mount_point = curr_mount_point.strip() if curr_mount_point else curr_mount_point
-
-      if curr_mount_point is not None and curr_mount_point != "":
-        if curr_mount_point == "/":
-          data_dirs_on_root.add(data_dir)
-        else:
-          data_dirs_on_mount.add(data_dir)
-      else:
-        data_dirs_unknown.add(data_dir)
-    else:
-      data_dirs_not_exist.add(data_dir)
-
-  # To keep the messages consistent for all hosts, sort the sets into lists
-  normalized_data_dirs = sorted(normalized_data_dirs)
-  data_dirs_not_exist = sorted(data_dirs_not_exist)
-  data_dirs_unknown = sorted(data_dirs_unknown)
-  data_dirs_on_root = sorted(data_dirs_on_root)
-
-  if data_dirs_not_exist:
-    errors.append("The following data dir(s) were not found: {0}\n".format("\n".join(data_dirs_not_exist)))
-
-  if data_dirs_unknown:
-    errors.append("Cannot find the mount point for the following data dir(s):\n{0}".format("\n".join(data_dirs_unknown)))
-
-  if data_dir_mount_file_exists:
-    # This dictionary contains the expected values of <data_dir, mount_point>
-    # Hence, we only need to analyze the data dirs that are currently on the root partition
-    # and report an error if they were expected to be on a mount.
-    #
-    # If one of the data dirs is not present in the file, it means that DataNode has not been restarted after
-    # the configuration was changed on the server, so we cannot make any assertions about it.
-    expected_data_dir_to_mount = mounted_dirs_helper.get_dir_to_mount_from_file(DATA_DIR_MOUNT_FILE)
-    for data_dir in data_dirs_on_root:
-      if data_dir in expected_data_dir_to_mount and expected_data_dir_to_mount[data_dir] != "/":
-        data_dirs_unmounted.append(data_dir)
-
-    if len(data_dirs_unmounted) > 0:
-      errors.append("Detected data dir(s) that became unmounted and are now writing to the root partition:\n{0}".format("\n".join(data_dirs_unmounted)))
-  else:
-    # Couldn't make guarantees about the expected value of mount points, so rely on this strategy that is likely to work.
-    # It will report false positives (aka false alarms) if the user actually intended to have
-    # 1+ data dirs on a mount and 1+ data dirs on the root partition.
-    if len(data_dirs_on_mount) >= 1 and len(data_dirs_on_root) >= 1:
-      errors.append("Detected at least one data dir on a mount point, but these are writing to the root partition:\n{0}".format("\n".join(data_dirs_on_root)))
-
-  # Determine the status based on warnings and errors.
-  if len(errors) == 0:
-    status = RESULT_STATE_OK
-    messages = []
-
-    # Check for warnings
-    if len(warnings) > 0:
-      status = RESULT_STATE_WARNING
-      messages += warnings
-
-    if len(normalized_data_dirs) > 0:
-      messages.append("The following data dir(s) are valid:\n{0}".format("\n".join(normalized_data_dirs)))
-    else:
-      messages.append("There are no data directories to analyze.")
-
-    return (status, ["\n".join(messages)])
-  else:
-    # Report errors
-    return (RESULT_STATE_CRITICAL, ["\n".join(errors)])
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_ha_namenode_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_ha_namenode_health.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_ha_namenode_health.py
deleted file mode 100644
index 28b3f22..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_ha_namenode_health.py
+++ /dev/null
@@ -1,243 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import urllib2
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import logging
-
-from resource_management.libraries.functions.curl_krb_request import curl_krb_request
-from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
-from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
-from resource_management.core.environment import Environment
-
-RESULT_STATE_OK = 'OK'
-RESULT_STATE_CRITICAL = 'CRITICAL'
-RESULT_STATE_UNKNOWN = 'UNKNOWN'
-RESULT_STATE_SKIPPED = 'SKIPPED'
-
-HDFS_NN_STATE_ACTIVE = 'active'
-HDFS_NN_STATE_STANDBY = 'standby'
-
-HDFS_SITE_KEY = '{{hdfs-site}}'
-NAMESERVICE_KEY = '{{hdfs-site/dfs.internal.nameservices}}'
-NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
-NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
-DFS_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
-
-KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
-KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
-EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
-INADDR_ANY = '0.0.0.0'
-NAMENODE_HTTP_FRAGMENT = 'dfs.namenode.http-address.{0}.{1}'
-NAMENODE_HTTPS_FRAGMENT = 'dfs.namenode.https-address.{0}.{1}'
-NAMENODE_RPC_FRAGMENT = 'dfs.namenode.rpc-address.{0}.{1}'
-
-CONNECTION_TIMEOUT_KEY = 'connection.timeout'
-CONNECTION_TIMEOUT_DEFAULT = 5.0
-
-LOGGER_EXCEPTION_MESSAGE = "[Alert] NameNode High Availability Health on {0} fails:"
-logger = logging.getLogger('ambari_alerts')
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (HDFS_SITE_KEY, NAMESERVICE_KEY, NN_HTTP_ADDRESS_KEY, EXECUTABLE_SEARCH_PATHS,
-  NN_HTTPS_ADDRESS_KEY, DFS_POLICY_KEY, SMOKEUSER_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY)
-  
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-  if configurations is None:
-    return (RESULT_STATE_UNKNOWN, ['There were no configurations supplied to the script.'])
-
-  # if not in HA mode, then SKIP
-  if not NAMESERVICE_KEY in configurations:
-    return (RESULT_STATE_SKIPPED, ['NameNode HA is not enabled'])
-
-  # hdfs-site is required
-  if not HDFS_SITE_KEY in configurations:
-    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
-  
-  if SMOKEUSER_KEY in configurations:
-    smokeuser = configurations[SMOKEUSER_KEY]
-
-  executable_paths = None
-  if EXECUTABLE_SEARCH_PATHS in configurations:
-    executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
-
-  # parse script arguments
-  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
-  if CONNECTION_TIMEOUT_KEY in parameters:
-    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
-
-  security_enabled = False
-  if SECURITY_ENABLED_KEY in configurations:
-    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
-
-  kerberos_keytab = None
-  if KERBEROS_KEYTAB in configurations:
-    kerberos_keytab = configurations[KERBEROS_KEYTAB]
-
-  kerberos_principal = None
-  if KERBEROS_PRINCIPAL in configurations:
-    kerberos_principal = configurations[KERBEROS_PRINCIPAL]
-    kerberos_principal = kerberos_principal.replace('_HOST', host_name)
-
-  kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
-
-  # determine whether or not SSL is enabled
-  is_ssl_enabled = False
-  if DFS_POLICY_KEY in configurations:
-    dfs_policy = configurations[DFS_POLICY_KEY]
-    if dfs_policy == "HTTPS_ONLY":
-      is_ssl_enabled = True
-
-  name_service = configurations[NAMESERVICE_KEY]
-  hdfs_site = configurations[HDFS_SITE_KEY]
-
-  # look for dfs.ha.namenodes.foo
-  nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service
-  if not nn_unique_ids_key in hdfs_site:
-    return (RESULT_STATE_UNKNOWN, ['Unable to find unique namenode alias key {0}'.format(nn_unique_ids_key)])
-
-  namenode_http_fragment = NAMENODE_HTTP_FRAGMENT
-  jmx_uri_fragment = "http://{0}/jmx?qry=Hadoop:service=NameNode,name=*"
-
-  if is_ssl_enabled:
-    namenode_http_fragment = NAMENODE_HTTPS_FRAGMENT
-    jmx_uri_fragment = "https://{0}/jmx?qry=Hadoop:service=NameNode,name=*"
-
-
-  active_namenodes = []
-  standby_namenodes = []
-  unknown_namenodes = []
-
-  # now we have something like 'nn1,nn2,nn3,nn4'
-  # turn it into dfs.namenode.[property].[dfs.nameservices].[nn_unique_id]
-  # ie dfs.namenode.http-address.hacluster.nn1
-  nn_unique_ids = hdfs_site[nn_unique_ids_key].split(',')
-  for nn_unique_id in nn_unique_ids:
-    key = namenode_http_fragment.format(name_service,nn_unique_id)
-    rpc_key = NAMENODE_RPC_FRAGMENT.format(name_service,nn_unique_id)
-
-    if key in hdfs_site:
-      # use str() to ensure that unicode strings do not have the u' in them
-      value = str(hdfs_site[key])
-      if INADDR_ANY in value and rpc_key in hdfs_site:
-        rpc_value = str(hdfs_site[rpc_key])
-        if INADDR_ANY not in rpc_value:
-          rpc_host = rpc_value.split(":")[0]
-          value = value.replace(INADDR_ANY, rpc_host)
-
-      try:
-        jmx_uri = jmx_uri_fragment.format(value)
-        if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
-          env = Environment.get_instance()
-
-          # curl requires an integer timeout
-          curl_connection_timeout = int(connection_timeout)
-
-          state_response, error_msg, time_millis  = curl_krb_request(env.tmp_dir,
-            kerberos_keytab, kerberos_principal, jmx_uri,"ha_nn_health", executable_paths, False,
-            "NameNode High Availability Health", smokeuser, connection_timeout=curl_connection_timeout,
-            kinit_timer_ms = kinit_timer_ms)
-
-          state = _get_ha_state_from_json(state_response)
-        else:
-          state_response = get_jmx(jmx_uri, connection_timeout)
-          state = _get_ha_state_from_json(state_response)
-
-        if state == HDFS_NN_STATE_ACTIVE:
-          active_namenodes.append(value)
-        elif state == HDFS_NN_STATE_STANDBY:
-          standby_namenodes.append(value)
-        else:
-          unknown_namenodes.append(value)
-      except:
-        logger.exception(LOGGER_EXCEPTION_MESSAGE.format(host_name))
-        unknown_namenodes.append(value)
-
-  # there's only one scenario here; there is exactly 1 active and 1 standby
-  is_topology_healthy = len(active_namenodes) == 1 and len(standby_namenodes) == 1
-
-  result_label = 'Active{0}, Standby{1}, Unknown{2}'.format(str(active_namenodes),
-    str(standby_namenodes), str(unknown_namenodes))
-
-  if is_topology_healthy:
-    # if there is exactly 1 active and 1 standby NN
-    return (RESULT_STATE_OK, [result_label])
-  else:
-    # other scenario
-    return (RESULT_STATE_CRITICAL, [result_label])
-
-
-def get_jmx(query, connection_timeout):
-  response = None
-  
-  try:
-    response = urllib2.urlopen(query, timeout=connection_timeout)
-    json_data = response.read()
-    return json_data
-  finally:
-    if response is not None:
-      try:
-        response.close()
-      except:
-        pass
-
-
-def _get_ha_state_from_json(string_json):
-  """
-  Searches through the specified JSON string looking for HA state
-  enumerations.
-  :param string_json: the string JSON
-  :return:  the value of the HA state (active, standby, etc)
-  """
-  json_data = json.loads(string_json)
-  jmx_beans = json_data["beans"]
-
-  # look for NameNodeStatus-State first
-  for jmx_bean in jmx_beans:
-    if "name" not in jmx_bean:
-      continue
-
-    jmx_bean_name = jmx_bean["name"]
-    if jmx_bean_name == "Hadoop:service=NameNode,name=NameNodeStatus" and "State" in jmx_bean:
-      return jmx_bean["State"]
-
-  # look for FSNamesystem-tag.HAState last
-  for jmx_bean in jmx_beans:
-    if "name" not in jmx_bean:
-      continue
-
-    jmx_bean_name = jmx_bean["name"]
-    if jmx_bean_name == "Hadoop:service=NameNode,name=FSNamesystem":
-      return jmx_bean["tag.HAState"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_metrics_deviation.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_metrics_deviation.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_metrics_deviation.py
deleted file mode 100644
index 8a06f56..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_metrics_deviation.py
+++ /dev/null
@@ -1,470 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-import httplib
-import locale
-import json
-import logging
-import urllib
-import time
-import urllib2
-
-from resource_management import Environment
-from ambari_commons.aggregate_functions import sample_standard_deviation, mean
-
-from resource_management.libraries.functions.curl_krb_request import curl_krb_request
-from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
-from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
-from ambari_commons.ambari_metrics_helper import select_metric_collector_for_sink
-
-
-RESULT_STATE_OK = 'OK'
-RESULT_STATE_CRITICAL = 'CRITICAL'
-RESULT_STATE_WARNING = 'WARNING'
-RESULT_STATE_UNKNOWN = 'UNKNOWN'
-RESULT_STATE_SKIPPED = 'SKIPPED'
-
-HDFS_NN_STATE_ACTIVE = 'active'
-HDFS_NN_STATE_STANDBY = 'standby'
-
-HDFS_SITE_KEY = '{{hdfs-site}}'
-NAMESERVICE_KEY = '{{hdfs-site/dfs.internal.nameservices}}'
-NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
-NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
-DFS_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
-
-KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
-KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
-EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
-
-METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY = '{{ams-site/timeline.metrics.service.webapp.address}}'
-METRICS_COLLECTOR_VIP_HOST_KEY = '{{cluster-env/metrics_collector_vip_host}}'
-METRICS_COLLECTOR_VIP_PORT_KEY = '{{cluster-env/metrics_collector_vip_port}}'
-
-CONNECTION_TIMEOUT_KEY = 'connection.timeout'
-CONNECTION_TIMEOUT_DEFAULT = 5.0
-
-MERGE_HA_METRICS_PARAM_KEY = 'mergeHaMetrics'
-MERGE_HA_METRICS_PARAM_DEFAULT = False
-METRIC_NAME_PARAM_KEY = 'metricName'
-METRIC_NAME_PARAM_DEFAULT = ''
-METRIC_UNITS_PARAM_KEY = 'metric.units'
-METRIC_UNITS_DEFAULT = ''
-APP_ID_PARAM_KEY = 'appId'
-APP_ID_PARAM_DEFAULT = 'NAMENODE'
-
-# the interval to check the metric (should be cast to int but could be a float)
-INTERVAL_PARAM_KEY = 'interval'
-INTERVAL_PARAM_DEFAULT = 60
-
-# the default threshold to trigger a CRITICAL (should be cast to int but could a float)
-DEVIATION_CRITICAL_THRESHOLD_KEY = 'metric.deviation.critical.threshold'
-DEVIATION_CRITICAL_THRESHOLD_DEFAULT = 10
-
-# the default threshold to trigger a WARNING (should be cast to int but could be a float)
-DEVIATION_WARNING_THRESHOLD_KEY = 'metric.deviation.warning.threshold'
-DEVIATION_WARNING_THRESHOLD_DEFAULT = 5
-NAMENODE_SERVICE_RPC_PORT_KEY = ''
-
-MINIMUM_VALUE_THRESHOLD_KEY = 'minimumValue'
-
-AMS_METRICS_GET_URL = "/ws/v1/timeline/metrics?%s"
-
-# The variance for this alert is 27MB which is 27% of the 100MB average (20MB is the limit)
-DEVIATION_THRESHOLD_MESSAGE = "The variance for this alert is {0}{1} which is {2:.0f}% of the {3}{4} average ({5}{6} is the limit)"
-
-# The variance for this alert is 15MB which is within 20% of the 904ms average (20MB is the limit)
-DEVIATION_OK_MESSAGE = "The variance for this alert is {0}{1} which is within {2:.0f}% of the {3}{4} average ({5}{6} is the limit)"
-
-logger = logging.getLogger()
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (HDFS_SITE_KEY, NAMESERVICE_KEY, NN_HTTP_ADDRESS_KEY, DFS_POLICY_KEY,
-          EXECUTABLE_SEARCH_PATHS, NN_HTTPS_ADDRESS_KEY, SMOKEUSER_KEY,
-          KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY,
-          METRICS_COLLECTOR_VIP_HOST_KEY, METRICS_COLLECTOR_VIP_PORT_KEY,
-          METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY)
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations : a mapping of configuration key to value
-  parameters : a mapping of script parameter key to value
-  host_name : the name of this host where the alert is running
-
-  :type configurations dict
-  :type parameters dict
-  :type host_name str
-  """
-  hostnames = host_name
-  current_time = int(time.time()) * 1000
-
-  # parse script arguments
-  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
-  if CONNECTION_TIMEOUT_KEY in parameters:
-    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
-
-  merge_ha_metrics = MERGE_HA_METRICS_PARAM_DEFAULT
-  if MERGE_HA_METRICS_PARAM_KEY in parameters:
-    merge_ha_metrics = parameters[MERGE_HA_METRICS_PARAM_KEY].lower() == 'true'
-
-  metric_name = METRIC_NAME_PARAM_DEFAULT
-  if METRIC_NAME_PARAM_KEY in parameters:
-    metric_name = parameters[METRIC_NAME_PARAM_KEY]
-
-  metric_units = METRIC_UNITS_DEFAULT
-  if METRIC_UNITS_PARAM_KEY in parameters:
-    metric_units = parameters[METRIC_UNITS_PARAM_KEY]
-
-  app_id = APP_ID_PARAM_DEFAULT
-  if APP_ID_PARAM_KEY in parameters:
-    app_id = parameters[APP_ID_PARAM_KEY]
-
-  interval = INTERVAL_PARAM_DEFAULT
-  if INTERVAL_PARAM_KEY in parameters:
-    interval = _coerce_to_integer(parameters[INTERVAL_PARAM_KEY])
-
-  warning_threshold = DEVIATION_WARNING_THRESHOLD_DEFAULT
-  if DEVIATION_WARNING_THRESHOLD_KEY in parameters:
-    warning_threshold = _coerce_to_integer(parameters[DEVIATION_WARNING_THRESHOLD_KEY])
-
-  critical_threshold = DEVIATION_CRITICAL_THRESHOLD_DEFAULT
-  if DEVIATION_CRITICAL_THRESHOLD_KEY in parameters:
-    critical_threshold = _coerce_to_integer(parameters[DEVIATION_CRITICAL_THRESHOLD_KEY])
-
-  minimum_value_threshold = None
-  if MINIMUM_VALUE_THRESHOLD_KEY in parameters:
-    minimum_value_threshold = _coerce_to_integer(parameters[MINIMUM_VALUE_THRESHOLD_KEY])
-
-  #parse configuration
-  if configurations is None:
-    return (RESULT_STATE_UNKNOWN, ['There were no configurations supplied to the script.'])
-
-  # hdfs-site is required
-  if not HDFS_SITE_KEY in configurations:
-    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
-
-  if METRICS_COLLECTOR_VIP_HOST_KEY in configurations and METRICS_COLLECTOR_VIP_PORT_KEY in configurations:
-    collector_host = configurations[METRICS_COLLECTOR_VIP_HOST_KEY]
-    collector_port = int(configurations[METRICS_COLLECTOR_VIP_PORT_KEY])
-  else:
-    # ams-site/timeline.metrics.service.webapp.address is required
-    if not METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY in configurations:
-      return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY)])
-    else:
-      collector_webapp_address = configurations[METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY].split(":")
-      if valid_collector_webapp_address(collector_webapp_address):
-        collector_host = select_metric_collector_for_sink(app_id.lower())
-        collector_port = int(collector_webapp_address[1])
-      else:
-        return (RESULT_STATE_UNKNOWN, ['{0} value should be set as "fqdn_hostname:port", but set to {1}'.format(
-          METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY, configurations[METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY])])
-
-  namenode_service_rpc_address = None
-  # hdfs-site is required
-  if not HDFS_SITE_KEY in configurations:
-    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
-
-  hdfs_site = configurations[HDFS_SITE_KEY]
-
-  if 'dfs.namenode.servicerpc-address' in hdfs_site:
-    namenode_service_rpc_address = hdfs_site['dfs.namenode.servicerpc-address']
-
-  # if namenode alert and HA mode
-  if NAMESERVICE_KEY in configurations and app_id.lower() == 'namenode':
-    # hdfs-site is required
-    if not HDFS_SITE_KEY in configurations:
-      return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
-
-    if SMOKEUSER_KEY in configurations:
-      smokeuser = configurations[SMOKEUSER_KEY]
-
-    executable_paths = None
-    if EXECUTABLE_SEARCH_PATHS in configurations:
-      executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
-
-    # parse script arguments
-    security_enabled = False
-    if SECURITY_ENABLED_KEY in configurations:
-      security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
-
-    kerberos_keytab = None
-    if KERBEROS_KEYTAB in configurations:
-      kerberos_keytab = configurations[KERBEROS_KEYTAB]
-
-    kerberos_principal = None
-    if KERBEROS_PRINCIPAL in configurations:
-      kerberos_principal = configurations[KERBEROS_PRINCIPAL]
-      kerberos_principal = kerberos_principal.replace('_HOST', host_name)
-
-    # determine whether or not SSL is enabled
-    is_ssl_enabled = False
-    if DFS_POLICY_KEY in configurations:
-      dfs_policy = configurations[DFS_POLICY_KEY]
-      if dfs_policy == "HTTPS_ONLY":
-        is_ssl_enabled = True
-
-    kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
-
-    name_service = configurations[NAMESERVICE_KEY]
-
-    # look for dfs.ha.namenodes.foo
-    nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service
-    if not nn_unique_ids_key in hdfs_site:
-      return (RESULT_STATE_UNKNOWN, ['Unable to find unique NameNode alias key {0}'.format(nn_unique_ids_key)])
-
-    namenode_http_fragment = 'dfs.namenode.http-address.{0}.{1}'
-    jmx_uri_fragment = "http://{0}/jmx?qry=Hadoop:service=NameNode,name=*"
-
-    if is_ssl_enabled:
-      namenode_http_fragment = 'dfs.namenode.https-address.{0}.{1}'
-      jmx_uri_fragment = "https://{0}/jmx?qry=Hadoop:service=NameNode,name=*"
-
-    # now we have something like 'nn1,nn2,nn3,nn4'
-    # turn it into dfs.namenode.[property].[dfs.nameservices].[nn_unique_id]
-    # ie dfs.namenode.http-address.hacluster.nn1
-    namenodes = []
-    active_namenodes = []
-    nn_unique_ids = hdfs_site[nn_unique_ids_key].split(',')
-    for nn_unique_id in nn_unique_ids:
-      key = namenode_http_fragment.format(name_service, nn_unique_id)
-
-      if key in hdfs_site:
-        # use str() to ensure that unicode strings do not have the u' in them
-        value = str(hdfs_site[key])
-        namenode = str(hdfs_site[key]).split(":")[0]
-
-        namenodes.append(namenode)
-        try:
-          jmx_uri = jmx_uri_fragment.format(value)
-          if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
-            env = Environment.get_instance()
-
-            # curl requires an integer timeout
-            curl_connection_timeout = int(connection_timeout)
-            state_response, error_msg, time_millis = curl_krb_request(env.tmp_dir,
-              kerberos_keytab, kerberos_principal, jmx_uri,"ha_nn_health", executable_paths, False,
-              "NameNode High Availability Health", smokeuser, connection_timeout=curl_connection_timeout,
-              kinit_timer_ms = kinit_timer_ms)
-
-            state = _get_ha_state_from_json(state_response)
-          else:
-            state_response = get_jmx(jmx_uri, connection_timeout)
-            state = _get_ha_state_from_json(state_response)
-
-          if state == HDFS_NN_STATE_ACTIVE:
-            active_namenodes.append(namenode)
-
-            # Only check active NN
-            nn_service_rpc_address_key = 'dfs.namenode.servicerpc-address.{0}.{1}'.format(name_service, nn_unique_id)
-            if nn_service_rpc_address_key in hdfs_site:
-              namenode_service_rpc_address = hdfs_site[nn_service_rpc_address_key]
-          pass
-        except:
-          logger.exception("Unable to determine the active NameNode")
-    pass
-
-    if merge_ha_metrics:
-      hostnames = ",".join(namenodes)
-      # run only on active NN, no need to run the same requests from the standby
-      if host_name not in active_namenodes:
-        return (RESULT_STATE_SKIPPED, ['This alert will be reported by another host.'])
-    pass
-
-  # Skip service rpc alert if port is not enabled
-  if not namenode_service_rpc_address and 'rpc.rpc.datanode' in metric_name:
-    return (RESULT_STATE_SKIPPED, ['Service RPC port is not enabled.'])
-
-  get_metrics_parameters = {
-    "metricNames": metric_name,
-    "appId": app_id,
-    "hostname": hostnames,
-    "startTime": current_time - interval * 60 * 1000,
-    "endTime": current_time,
-    "grouped": "true",
-    }
-
-  encoded_get_metrics_parameters = urllib.urlencode(get_metrics_parameters)
-
-  try:
-    conn = httplib.HTTPConnection(collector_host, int(collector_port),
-                                  timeout=connection_timeout)
-    conn.request("GET", AMS_METRICS_GET_URL % encoded_get_metrics_parameters)
-    response = conn.getresponse()
-    data = response.read()
-    conn.close()
-  except Exception:
-    return (RESULT_STATE_UNKNOWN, ["Unable to retrieve metrics from the Ambari Metrics service."])
-
-  if response.status != 200:
-    return (RESULT_STATE_UNKNOWN, ["Unable to retrieve metrics from the Ambari Metrics service."])
-
-  data_json = json.loads(data)
-  metrics = []
-  # will get large standard deviation for multiple hosts,
-  # if host1 reports small local values, but host2 reports large local values
-  for metrics_data in data_json["metrics"]:
-    metrics += metrics_data["metrics"].values()
-  pass
-
-  if not metrics or len(metrics) < 2:
-    number_of_data_points = len(metrics) if metrics else 0
-    return (RESULT_STATE_SKIPPED, ["There are not enough data points to calculate the standard deviation ({0} sampled)".format(
-      number_of_data_points)])
-
-  minimum_value_multiplier = 1
-  if 'dfs.FSNamesystem.CapacityUsed' in metric_name:
-    minimum_value_multiplier = 1024 * 1024  # MB to bytes
-  elif 'rpc.rpc.datanode' in metric_name or 'rpc.rpc.client' in metric_name:
-    minimum_value_multiplier = 1000  # seconds to millis
-
-  if minimum_value_threshold:
-    # Filter out points below min threshold
-    metrics = [metric for metric in metrics if metric > (minimum_value_threshold * minimum_value_multiplier)]
-    if len(metrics) < 2:
-      return (RESULT_STATE_OK, ['There were no data points above the minimum threshold of {0} seconds'.format(minimum_value_threshold)])
-
-  mean_value = mean(metrics)
-  stddev = sample_standard_deviation(metrics)
-
-  try:
-    deviation_percent = stddev / float(mean_value) * 100
-  except ZeroDivisionError:
-    # should not be a case for this alert
-    return (RESULT_STATE_SKIPPED, ["Unable to calculate the standard deviation because the mean value is 0"])
-
-  # log the AMS request
-  if logger.isEnabledFor(logging.DEBUG):
-    logger.debug("""
-    AMS request parameters - {0}
-    AMS response - {1}
-    Mean - {2}
-    Standard deviation - {3}
-    Percentage standard deviation - {4}
-    """.format(encoded_get_metrics_parameters, data_json, mean_value, stddev, deviation_percent))
-
-  mean_value_localized = locale.format("%.0f", mean_value, grouping=True)
-
-  variance_value = (deviation_percent / 100.0) * mean_value
-  variance_value_localized = locale.format("%.0f", variance_value, grouping=True)
-
-  # check for CRITICAL status
-  if deviation_percent > critical_threshold:
-    threshold_value = ((critical_threshold / 100.0) * mean_value)
-    threshold_value_localized = locale.format("%.0f", threshold_value, grouping=True)
-
-    message = DEVIATION_THRESHOLD_MESSAGE.format(variance_value_localized, metric_units, deviation_percent,
-      mean_value_localized, metric_units, threshold_value_localized, metric_units)
-
-    return (RESULT_STATE_CRITICAL,[message])
-
-  # check for WARNING status
-  if deviation_percent > warning_threshold:
-    threshold_value = ((warning_threshold / 100.0) * mean_value)
-    threshold_value_localized = locale.format("%.0f", threshold_value, grouping = True)
-
-    message = DEVIATION_THRESHOLD_MESSAGE.format(variance_value_localized, metric_units, deviation_percent,
-      mean_value_localized, metric_units, threshold_value_localized, metric_units)
-
-    return (RESULT_STATE_WARNING, [message])
-
-  # return OK status; use the warning threshold as the value to compare against
-  threshold_value = ((warning_threshold / 100.0) * mean_value)
-  threshold_value_localized = locale.format("%.0f", threshold_value, grouping = True)
-
-  message = DEVIATION_OK_MESSAGE.format(variance_value_localized, metric_units, warning_threshold,
-    mean_value_localized, metric_units, threshold_value_localized, metric_units)
-
-  return (RESULT_STATE_OK,[message])
-
-
-def valid_collector_webapp_address(webapp_address):
-  if len(webapp_address) == 2 \
-    and webapp_address[0] != '127.0.0.1' \
-    and webapp_address[1].isdigit():
-    return True
-
-  return False
-
-
-def get_jmx(query, connection_timeout):
-  response = None
-
-  try:
-    response = urllib2.urlopen(query, timeout=connection_timeout)
-    json_data = response.read()
-    return json_data
-  except Exception:
-    return {"beans": {}}
-  finally:
-    if response is not None:
-      try:
-        response.close()
-      except:
-        pass
-
-def _get_ha_state_from_json(string_json):
-  """
-  Searches through the specified JSON string looking for HA state
-  enumerations.
-  :param string_json: the string JSON
-  :return:  the value of the HA state (active, standby, etc)
-  """
-  json_data = json.loads(string_json)
-  jmx_beans = json_data["beans"]
-
-  # look for NameNodeStatus-State  first
-  for jmx_bean in jmx_beans:
-    if "name" not in jmx_bean:
-      continue
-
-    jmx_bean_name = jmx_bean["name"]
-    if jmx_bean_name == "Hadoop:service=NameNode,name=NameNodeStatus" and "State" in jmx_bean:
-      return jmx_bean["State"]
-
-  # look for FSNamesystem-tag.HAState last
-  for jmx_bean in jmx_beans:
-    if "name" not in jmx_bean:
-      continue
-
-    jmx_bean_name = jmx_bean["name"]
-    if jmx_bean_name == "Hadoop:service=NameNode,name=FSNamesystem":
-      return jmx_bean["tag.HAState"]
-
-
-def _coerce_to_integer(value):
-  """
-  Attempts to correctly coerce a value to an integer. For the case of an integer or a float,
-  this will essentially either NOOP or return a truncated value. If the parameter is a string,
-  then it will first attempt to be coerced from a integer, and failing that, a float.
-  :param value: the value to coerce
-  :return: the coerced value as an integer
-  """
-  try:
-    return int(value)
-  except ValueError:
-    return int(float(value))

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_upgrade_finalized.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_upgrade_finalized.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_upgrade_finalized.py
deleted file mode 100644
index 427f1d1..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/alerts/alert_upgrade_finalized.py
+++ /dev/null
@@ -1,179 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import urllib2
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import logging
-import traceback
-
-from resource_management.libraries.functions.curl_krb_request import curl_krb_request
-from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
-from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
-from resource_management.libraries.functions.curl_krb_request import CONNECTION_TIMEOUT_DEFAULT
-from resource_management.core.environment import Environment
-from resource_management.libraries.functions.namenode_ha_utils import get_all_namenode_addresses
-
-NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
-NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
-NN_HTTP_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
-
-HDFS_SITE_KEY = '{{hdfs-site}}'
-KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
-KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-SMOKEUSER_KEY = "{{cluster-env/smokeuser}}"
-EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
-logger = logging.getLogger('ambari_alerts')
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-
-  :rtype tuple
-  """
-  return (HDFS_SITE_KEY, NN_HTTP_ADDRESS_KEY, NN_HTTPS_ADDRESS_KEY, NN_HTTP_POLICY_KEY, EXECUTABLE_SEARCH_PATHS,
-          KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY, SMOKEUSER_KEY)
-
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations : a mapping of configuration key to value
-  parameters : a mapping of script parameter key to value
-  host_name : the name of this host where the alert is running
-
-  :type configurations dict
-  :type parameters dict
-  :type host_name str
-  """
-
-  if configurations is None:
-    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
-
-  uri = None
-  http_policy = 'HTTP_ONLY'
-
-  # hdfs-site is required
-  if not HDFS_SITE_KEY in configurations:
-    return 'SKIPPED', ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)]
-
-  if NN_HTTP_POLICY_KEY in configurations:
-    http_policy = configurations[NN_HTTP_POLICY_KEY]
-
-  if SMOKEUSER_KEY in configurations:
-    smokeuser = configurations[SMOKEUSER_KEY]
-
-  executable_paths = None
-  if EXECUTABLE_SEARCH_PATHS in configurations:
-    executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
-
-  security_enabled = False
-  if SECURITY_ENABLED_KEY in configurations:
-    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
-
-  kerberos_keytab = None
-  if KERBEROS_KEYTAB in configurations:
-    kerberos_keytab = configurations[KERBEROS_KEYTAB]
-
-  kerberos_principal = None
-  if KERBEROS_PRINCIPAL in configurations:
-    kerberos_principal = configurations[KERBEROS_PRINCIPAL]
-    kerberos_principal = kerberos_principal.replace('_HOST', host_name)
-
-  kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
-
-  # determine the right URI and whether to use SSL
-  hdfs_site = configurations[HDFS_SITE_KEY]
-
-  scheme = "https" if http_policy == "HTTPS_ONLY" else "http"
-
-  nn_addresses = get_all_namenode_addresses(hdfs_site)
-  for nn_address in nn_addresses:
-    if nn_address.startswith(host_name + ":") or nn_address == host_name:
-      uri = nn_address
-      break
-  if not uri:
-    return 'SKIPPED', [
-      'NameNode on host {0} not found (namenode adresses = {1})'.format(host_name, ', '.join(nn_addresses))]
-
-  upgrade_finalized_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo".format(scheme, uri)
-
-  # start out assuming an OK status
-  label = None
-  result_code = "OK"
-
-  try:
-    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
-      env = Environment.get_instance()
-
-      last_checkpoint_time_response, error_msg, time_millis = curl_krb_request(
-        env.tmp_dir, kerberos_keytab,
-        kerberos_principal, upgrade_finalized_qry, "upgrade_finalized_state", executable_paths, False,
-        "HDFS Upgrade Finalized State", smokeuser, kinit_timer_ms = kinit_timer_ms
-       )
-
-      upgrade_finalized_response_json = json.loads(last_checkpoint_time_response)
-      upgrade_finalized = bool(upgrade_finalized_response_json["beans"][0]["UpgradeFinalized"])
-
-    else:
-      upgrade_finalized = bool(get_value_from_jmx(upgrade_finalized_qry,
-                                                    "UpgradeFinalized"))
-
-    if upgrade_finalized:
-      label = "HDFS cluster is not in the upgrade state"
-      result_code = 'OK'
-    else:
-      label = "HDFS cluster is not finalized"
-      result_code = 'CRITICAL'
-
-  except:
-    label = traceback.format_exc()
-    result_code = 'UNKNOWN'
-
-  return ((result_code, [label]))
-
-def get_value_from_jmx(query, jmx_property):
-  """
-   Read property from the jxm endpoint
-
-  :param query: jmx uri path
-  :param jmx_property: property name to read
-  :return: jmx property value
-  
-  :type query str
-  :type jmx_property str
-  """
-  response = None
-
-  try:
-    response = urllib2.urlopen(query, timeout=int(CONNECTION_TIMEOUT_DEFAULT))
-    data = response.read()
-
-    data_dict = json.loads(data)
-    return data_dict["beans"][0][jmx_property]
-  finally:
-    if response is not None:
-      try:
-        response.close()
-      except:
-        pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/files/checkWebUI.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/files/checkWebUI.py
deleted file mode 100644
index ddeb116..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/files/checkWebUI.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import httplib
-import socket
-import ssl
-
-class TLS1HTTPSConnection(httplib.HTTPSConnection):
-  """
-  Some of python implementations does not work correctly with sslv3 but trying to use it, we need to change protocol to
-  tls1.
-  """
-  def __init__(self, host, port, **kwargs):
-    httplib.HTTPSConnection.__init__(self, host, port, **kwargs)
-
-  def connect(self):
-    sock = socket.create_connection((self.host, self.port), self.timeout)
-    if getattr(self, '_tunnel_host', None):
-      self.sock = sock
-      self._tunnel()
-    self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1)
-
-def make_connection(host, port, https):
-  try:
-    conn = httplib.HTTPConnection(host, port) if not https else httplib.HTTPSConnection(host, port)
-    conn.request("GET", "/")
-    return conn.getresponse().status
-  except ssl.SSLError:
-    # got ssl error, lets try to use TLS1 protocol, maybe it will work
-    try:
-      tls1_conn = TLS1HTTPSConnection(host, port)
-      tls1_conn.request("GET", "/")
-      return tls1_conn.getresponse().status
-    except Exception as e:
-      print e
-    finally:
-      tls1_conn.close()
-  except Exception as e:
-    print e
-  finally:
-    conn.close()
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
-  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
-  parser.add_option("-s", "--https", dest="https", help="\"True\" if value of dfs.http.policy is \"HTTPS_ONLY\"")
-
-  (options, args) = parser.parse_args()
-  
-  hosts = options.hosts.split(',')
-  port = options.port
-  https = options.https
-
-  for host in hosts:
-    httpCode = make_connection(host, port, https.lower() == "true")
-
-    if httpCode != 200:
-      print "Cannot access WEB UI on: http://" + host + ":" + port if not https.lower() == "true" else "Cannot access WEB UI on: https://" + host + ":" + port
-      exit(1)
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/__init__.py
deleted file mode 100644
index 35de4bb..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""


[18/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/core-site.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/core-site.xml
deleted file mode 100644
index 20b1930..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/core-site.xml
+++ /dev/null
@@ -1,224 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
-<!-- Put site-specific property overrides in this file. -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
-  <property>
-    <name>ha.failover-controller.active-standby-elector.zk.op.retries</name>
-    <value>120</value>
-    <description>ZooKeeper Failover Controller retries setting for your environment</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- i/o properties -->
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- file system properties -->
-  <property>
-    <name>fs.defaultFS</name>
-    <!-- cluster variant -->
-    <value>hdfs://localhost:8020</value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for HDFS.</description>
-    <final>true</final>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes after which the checkpoint gets deleted.
-        If zero, the trash feature is disabled.
-        This option may be configured both on the server and the client.
-        If trash is disabled server side then the client side configuration is checked.
-        If trash is enabled on the server side then the value configured on the server is used and the client configuration value is ignored.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ipc.server.tcpnodelay</name>
-    <value>true</value>
-    <description>Turn on/off Nagle's algorithm for the TCP socket
-      connection on
-      the server. Setting to true disables the algorithm and may
-      decrease latency
-      with a cost of more/smaller packets.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>mapreduce.jobtracker.webinterface.trusted</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hadoop.security.authentication</name>
-    <value>simple</value>
-    <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hadoop.security.authorization</name>
-    <value>false</value>
-    <description>
-     Enable authorization for different protocols.
-  </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value>DEFAULT</value>
-    <description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-    <value-attributes>
-      <type>multiLine</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>net.topology.script.file.name</name>
-    <value>/etc/hadoop/conf/topology_script.py</value>
-    <description>
-      Location of topology script used by Hadoop to determine the rack location of nodes.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>hadoop.http.authentication.simple.anonymous.allowed</name>
-    <value>true</value>
-    <description>
-      Indicates if anonymous requests are allowed when using &apos;simple&apos; authentication.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hadoop.security.key.provider.path</name>
-    <value/>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hadoop-env</type>
-        <name>keyserver_host</name>
-      </property>
-      <property>
-        <type>hadoop-env</type>
-        <name>keyserver_port</name>
-      </property>
-      <property>
-        <type>kms-env</type>
-        <name>kms_port</name>
-      </property>
-      <property>
-        <type>ranger-kms-site</type>
-        <name>ranger.service.https.attrib.ssl.enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml
deleted file mode 100644
index 24032fa..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml
+++ /dev/null
@@ -1,421 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-    <display-name>Hadoop Log Dir Prefix</display-name>
-    <value-attributes>
-      <type>directory</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <display-name>Hadoop PID Dir Prefix</display-name>
-    <description>Hadoop PID Dir Prefix</description>
-    <value-attributes>
-      <type>directory</type>
-      <overridable>false</overridable>
-      <editable-only-at-install>true</editable-only-at-install>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hadoop_root_logger</name>
-    <value>INFO,RFA</value>
-    <display-name>Hadoop Root Logger</display-name>
-    <description>Hadoop Root Logger</description>
-    <value-attributes>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-    <display-name>Hadoop maximum Java heap size</display-name>
-    <value-attributes>
-      <type>int</type>
-      <unit>MB</unit>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-    <display-name>NameNode Java heap size</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>268435456</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hdfs-site</type>
-        <name>dfs.datanode.data.dir</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
-    <display-name>NameNode new generation size</display-name>
-    <depends-on>
-      <property>
-        <type>hadoop-env</type>
-        <name>namenode_heapsize</name>
-      </property>
-    </depends-on>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>16384</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>NameNode maximum new generation size</description>
-    <display-name>NameNode maximum new generation size</display-name>
-    <depends-on>
-      <property>
-        <type>hadoop-env</type>
-        <name>namenode_heapsize</name>
-      </property>
-    </depends-on>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>16384</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>namenode_opt_permsize</name>
-    <value>128</value>
-    <description>NameNode permanent generation size</description>
-    <display-name>NameNode permanent generation size</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>2096</maximum>
-      <unit>MB</unit>
-      <increment-step>128</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>namenode_opt_maxpermsize</name>
-    <value>256</value>
-    <description>NameNode maximum permanent generation size</description>
-    <display-name>NameNode maximum permanent generation size</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>2096</maximum>
-      <unit>MB</unit>
-      <increment-step>128</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-    <display-name>DataNode maximum Java heap size</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>268435456</maximum>
-      <unit>MB</unit>
-      <increment-step>128</increment-step>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <display-name>Proxy User Group</display-name>
-    <value>users</value>
-    <property-type>GROUP</property-type>
-    <description>Proxy user group.</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hdfs_user</name>
-    <display-name>HDFS User</display-name>
-    <value>hdfs</value>
-    <property-type>USER</property-type>
-    <description>User to run HDFS as</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hdfs_tmp_dir</name>
-    <value>/tmp</value>
-    <description>HDFS tmp Dir</description>
-    <display-name>HDFS tmp Dir</display-name>
-    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
-    <value-attributes>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hdfs_user_nofile_limit</name>
-    <value>128000</value>
-    <description>Max open files limit setting for HDFS user.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hdfs_user_nproc_limit</name>
-    <value>65536</value>
-    <description>Max number of processes limit setting for HDFS user.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>namenode_backup_dir</name>
-    <description>Local directory for storing backup copy of NameNode images during upgrade</description>
-    <value>/tmp/upgrades</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hdfs_user_keytab</name>
-    <description>HDFS keytab path</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hdfs_principal_name</name>
-    <description>HDFS principal name</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>keyserver_host</name>
-    <value> </value>
-    <display-name>Key Server Host</display-name>
-    <description>Hostnames where Key Management Server is installed</description>
-    <value-attributes>
-      <type>string</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>keyserver_port</name>
-    <value/>
-    <display-name>Key Server Port</display-name>
-    <description>Port number where Key Management Server is available</description>
-    <value-attributes>
-      <type>int</type>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.3 -->
-  <!-- hadoop-env.sh -->
-  <property>
-    <name>content</name>
-    <display-name>hadoop-env template</display-name>
-    <description>This is the jinja template for hadoop-env.sh file</description>
-    <value>
-      # Set Hadoop-specific environment variables here.
-
-      # The only required environment variable is JAVA_HOME.  All others are
-      # optional.  When running a distributed configuration it is best to
-      # set JAVA_HOME in this file, so that it is correctly defined on
-      # remote nodes.
-
-      # The java implementation to use.  Required.
-      export JAVA_HOME={{java_home}}
-      export HADOOP_HOME_WARN_SUPPRESS=1
-
-      # Hadoop home directory
-      export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
-
-      # Hadoop Configuration Directory
-      #TODO: if env var set that can cause problems
-      export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
-
-
-      # Path to jsvc required by secure datanode
-      export JSVC_HOME={{jsvc_path}}
-
-
-      # The maximum amount of heap to use, in MB. Default is 1000.
-      if [[ ("$SERVICE" = "hiveserver2") || ("$SERVICE" = "metastore") || ( "$SERVICE" = "cli") ]]; then
-      if [ "$HADOOP_HEAPSIZE" = "" ]; then
-      export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-      fi
-      else
-      export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-      fi
-
-
-      export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
-
-      # Extra Java runtime options.  Empty by default.
-      export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
-
-      # Command specific options appended to HADOOP_OPTS when specified
-
-      {% if java_version &lt; 8 %}
-      export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1  ${HADOOP_NAMENODE_OPTS}"
-      export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-      # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-      export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
-      {% else %}
-      export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
-      export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-      # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-      export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-      {% endif %}
-      HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
-
-      HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-      HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
-      HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
-
-      # On secure datanodes, user to run the datanode as after dropping privileges
-      export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
-
-      # Extra ssh options.  Empty by default.
-      export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
-
-      # Where log files are stored.  $HADOOP_HOME/logs by default.
-      export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
-
-      # History server logs
-      export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
-
-      # Where log files are stored in the secure data environment.
-      export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-      # File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-      # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-      # host:path where hadoop code should be rsync'd from.  Unset by default.
-      # export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-      # Seconds to sleep between slave commands.  Unset by default.  This
-      # can be useful in large clusters, where, e.g., slave rsyncs can
-      # otherwise arrive faster than the master can service them.
-      # export HADOOP_SLAVE_SLEEP=0.1
-
-      # The directory where pid files are stored. /tmp by default.
-      export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
-      export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-      # History server pid
-      export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
-
-      YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY -Drm.audit.logger=INFO,RMAUDIT"
-
-      # A string representing this instance of hadoop. $USER by default.
-      export HADOOP_IDENT_STRING=$USER
-
-      # The scheduling priority for daemon processes.  See 'man nice'.
-
-      # export HADOOP_NICENESS=10
-
-      # Add database libraries
-      JAVA_JDBC_LIBS=""
-      if [ -d "/usr/share/java" ]; then
-      for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
-      do
-      JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-      done
-      fi
-
-      # Add libraries required by nodemanager
-      MAPREDUCE_LIBS={{mapreduce_libs_path}}
-
-      # Add libraries to the hadoop classpath - some may not need a colon as they already include it
-      export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
-
-      if [ -d "/usr/lib/tez" ]; then
-      export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
-      fi
-
-      # Setting path to hdfs command line
-      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-
-      #Mostly required for hadoop 2.0
-      export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
-
-      {% if is_datanode_max_locked_memory_set %}
-      # Fix temporary bug, when ulimit from conf files is not picked up, without full relogin.
-      # Makes sense to fix only when runing DN as root
-      if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
-      ulimit -l {{datanode_max_locked_memory}}
-      fi
-      {% endif %}
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>nfsgateway_heapsize</name>
-    <display-name>NFSGateway maximum Java heap size</display-name>
-    <value>1024</value>
-    <description>Maximum Java heap size for NFSGateway (Java option -Xmx)</description>
-    <value-attributes>
-      <type>int</type>
-      <unit>MB</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
deleted file mode 100644
index 6b45e84..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
+++ /dev/null
@@ -1,125 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <!-- hadoop-metrics2.properties -->
-  <property>
-    <name>content</name>
-    <display-name>hadoop-metrics2.properties template</display-name>
-    <description>This is the jinja template for hadoop-metrics2.properties file</description>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_ganglia_server %}
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
-datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
-jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
-tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
-maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
-reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
-resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
-nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
-historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
-journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
-nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
-supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-{% endif %}
-
-{% if has_metric_collector %}
-
-*.period={{metrics_collection_period}}
-*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-*.sink.timeline.period={{metrics_collection_period}}
-*.sink.timeline.sendInterval={{metrics_report_interval}}000
-*.sink.timeline.slave.host.name={{hostname}}
-*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
-*.sink.timeline.protocol={{metric_collector_protocol}}
-*.sink.timeline.port={{metric_collector_port}}
-
-# HTTPS properties
-*.sink.timeline.truststore.path = {{metric_truststore_path}}
-*.sink.timeline.truststore.type = {{metric_truststore_type}}
-*.sink.timeline.truststore.password = {{metric_truststore_password}}
-
-datanode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-namenode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
-nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
-jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
-journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-maptask.sink.timeline.collector.hosts={{ams_collector_hosts}}
-reducetask.sink.timeline.collector.hosts={{ams_collector_hosts}}
-applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
-
-resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
-
-{% if is_nn_client_port_configured %}
-# Namenode rpc ports customization
-namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
-{% endif %}
-{% if is_nn_dn_port_configured %}
-namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
-{% endif %}
-{% if is_nn_healthcheck_port_configured %}
-namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
-{% endif %}
-
-{% endif %}
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-policy.xml
deleted file mode 100644
index 8e9486d..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,130 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Put site-specific property overrides in this file. -->
-<configuration supports_final="true">
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml
deleted file mode 100644
index 37b339e..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml
+++ /dev/null
@@ -1,226 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>content</name>
-    <display-name>hdfs-log4j template</display-name>
-    <description>Custom log4j.properties</description>
-    <value>
-      #
-      # Licensed to the Apache Software Foundation (ASF) under one
-      # or more contributor license agreements.  See the NOTICE file
-      # distributed with this work for additional information
-      # regarding copyright ownership.  The ASF licenses this file
-      # to you under the Apache License, Version 2.0 (the
-      # "License"); you may not use this file except in compliance
-      # with the License.  You may obtain a copy of the License at
-      #
-      #  http://www.apache.org/licenses/LICENSE-2.0
-      #
-      # Unless required by applicable law or agreed to in writing,
-      # software distributed under the License is distributed on an
-      # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-      # KIND, either express or implied.  See the License for the
-      # specific language governing permissions and limitations
-      # under the License.
-      #
-
-
-      # Define some default values that can be overridden by system properties
-      # To change daemon root logger use hadoop_root_logger in hadoop-env
-      hadoop.root.logger=INFO,console
-      hadoop.log.dir=.
-      hadoop.log.file=hadoop.log
-
-
-      # Define the root logger to the system property "hadoop.root.logger".
-      log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-      # Logging Threshold
-      log4j.threshhold=ALL
-
-      #
-      # Daily Rolling File Appender
-      #
-
-      log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-      # Rollver at midnight
-      log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-      # 30-day backup
-      #log4j.appender.DRFA.MaxBackupIndex=30
-      log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-      # Pattern format: Date LogLevel LoggerName LogMessage
-      log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-      # Debugging Pattern format
-      #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-      #
-      # console
-      # Add "console" to rootlogger above if you want to use this
-      #
-
-      log4j.appender.console=org.apache.log4j.ConsoleAppender
-      log4j.appender.console.target=System.err
-      log4j.appender.console.layout=org.apache.log4j.PatternLayout
-      log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-      #
-      # TaskLog Appender
-      #
-
-      #Default values
-      hadoop.tasklog.taskid=null
-      hadoop.tasklog.iscleanup=false
-      hadoop.tasklog.noKeepSplits=4
-      hadoop.tasklog.totalLogFileSize=100
-      hadoop.tasklog.purgeLogSplits=true
-      hadoop.tasklog.logsRetainHours=12
-
-      log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-      log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-      log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-      log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-      log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-      log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-      #
-      #Security audit appender
-      #
-      hadoop.security.logger=INFO,console
-      hadoop.security.log.maxfilesize=256MB
-      hadoop.security.log.maxbackupindex=20
-      log4j.category.SecurityLogger=${hadoop.security.logger}
-      hadoop.security.log.file=SecurityAuth.audit
-      log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-      log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-      log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-      log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-      log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
-      log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-      log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-      log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-      log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-      log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-      #
-      # hdfs audit logging
-      #
-      hdfs.audit.logger=INFO,console
-      log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-      log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-      log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-      log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-      log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-      log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
-
-      #
-      # NameNode metrics logging.
-      # The default is to retain two namenode-metrics.log files up to 64MB each.
-      #
-      namenode.metrics.logger=INFO,NullAppender
-      log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
-      log4j.additivity.NameNodeMetricsLog=false
-      log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
-      log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
-      log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
-      log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
-      log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
-      log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
-
-      #
-      # mapred audit logging
-      #
-      mapred.audit.logger=INFO,console
-      log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-      log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-      log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-      log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-      log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-      log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-      #
-      # Rolling File Appender
-      #
-
-      log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-      log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-      # Logfile size and and 30-day backups
-      log4j.appender.RFA.MaxFileSize=256MB
-      log4j.appender.RFA.MaxBackupIndex=10
-
-      log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-      log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-      log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-      # Custom Logging levels
-
-      hadoop.metrics.log.level=INFO
-      #log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-      #log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-      #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-      log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-      # Jets3t library
-      log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-      #
-      # Null Appender
-      # Trap security logger on the hadoop client side
-      #
-      log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-      #
-      # Event Counter Appender
-      # Sends counts of logging messages at different severity levels to Hadoop Metrics.
-      #
-      log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-      # Removes "deprecated" messages
-      log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
-
-      #
-      # HDFS block state change log from block manager
-      #
-      # Uncomment the following to suppress normal block state change
-      # messages from BlockManager in NameNode.
-      #log4j.logger.BlockStateChange=WARN
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-logsearch-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-logsearch-conf.xml
deleted file mode 100644
index d85a028..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-logsearch-conf.xml
+++ /dev/null
@@ -1,248 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>HDFS</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>NAMENODE:hdfs_namenode;DATANODE:hdfs_datanode;SECONDARY_NAMENODE:hdfs_secondarynamenode;JOURNALNODE:hdfs_journalnode;ZKFC:hdfs_zkfc;NFS_GATEWAY:hdfs_nfs3</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"hdfs_datanode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-datanode-*.log"
-    },
-    {
-      "type":"hdfs_namenode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-namenode-*.log"
-    },
-    {
-      "type":"hdfs_journalnode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-journalnode-*.log"
-    },
-    {
-      "type":"hdfs_secondarynamenode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-secondarynamenode-*.log"
-    },
-    {
-      "type":"hdfs_zkfc",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-zkfc-*.log"
-    },
-    {
-      "type":"hdfs_nfs3",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-nfs3-*.log"
-    },
-    {
-      "type":"hdfs_audit",
-      "rowtype":"audit",
-      "is_enabled":"true",
-      "add_fields":{
-        "logType":"HDFSAudit",
-        "enforcer":"hadoop-acl",
-        "repoType":"1",
-        "repo":"hdfs"
-      },
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hdfs-audit.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_datanode",
-            "hdfs_journalnode",
-            "hdfs_secondarynamenode",
-            "hdfs_namenode",
-            "hdfs_zkfc",
-            "hdfs_nfs3"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-        }
-       }
-     },
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_audit"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:evtTime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:evtTime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "evtTime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     },
-    {
-      "filter":"keyvalue",
-      "sort_order":1,
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_audit"
-          ]
-         }
-       },
-      "source_field":"log_message",
-      "value_split":"=",
-      "field_split":"\t",
-      "post_map_values":{
-        "src":{
-          "map_fieldname":{
-            "new_fieldname":"resource"
-          }
-         },
-        "ip":{
-          "map_fieldname":{
-            "new_fieldname":"cliIP"
-          }
-         },
-        "allowed":[
-          {
-            "map_fieldvalue":{
-              "pre_value":"true",
-              "post_value":"1"
-            }
-           },
-          {
-            "map_fieldvalue":{
-              "pre_value":"false",
-              "post_value":"0"
-            }
-           },
-          {
-            "map_fieldname":{
-              "new_fieldname":"result"
-            }
-           }
-         ],
-        "cmd":{
-          "map_fieldname":{
-            "new_fieldname":"action"
-          }
-         },
-        "proto":{
-          "map_fieldname":{
-            "new_fieldname":"cliType"
-          }
-         },
-        "callerContext":{
-          "map_fieldname":{
-            "new_fieldname":"req_caller_id"
-          }
-         }
-       }
-     },
-    {
-      "filter":"grok",
-      "sort_order":2,
-      "source_field":"ugi",
-      "remove_source_field":"false",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_audit"
-          ]
-         }
-       },
-      "message_pattern":"%{USERNAME:p_user}.+auth:%{USERNAME:p_authType}.+via %{USERNAME:k_user}.+auth:%{USERNAME:k_authType}|%{USERNAME:user}.+auth:%{USERNAME:authType}|%{USERNAME:x_user}",
-      "post_map_values":{
-        "user":{
-          "map_fieldname":{
-            "new_fieldname":"reqUser"
-          }
-         },
-        "x_user":{
-          "map_fieldname":{
-            "new_fieldname":"reqUser"
-          }
-         },
-        "p_user":{
-          "map_fieldname":{
-            "new_fieldname":"reqUser"
-          }
-         },
-        "k_user":{
-          "map_fieldname":{
-            "new_fieldname":"proxyUsers"
-          }
-         },
-        "p_authType":{
-          "map_fieldname":{
-            "new_fieldname":"authType"
-          }
-         },
-        "k_authType":{
-          "map_fieldname":{
-            "new_fieldname":"proxyAuthType"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
deleted file mode 100644
index 689b6d08..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,632 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Put site-specific property overrides in this file. -->
-<configuration supports_final="true">
-  <!-- file system properties -->
-  <property>
-    <name>dfs.namenode.name.dir</name>
-    <!-- cluster variant -->
-    <value>/hadoop/hdfs/namenode</value>
-    <display-name>NameNode directories</display-name>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-    <value-attributes>
-      <type>directories</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <display-name>WebHDFS enabled</display-name>
-    <description>Whether to enable WebHDFS feature</description>
-    <final>true</final>
-    <value-attributes>
-      <type>boolean</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
-    <final>true</final>
-    <display-name>DataNode failed disk tolerance</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>2</maximum>
-      <increment-step>1</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hdfs-site</type>
-        <name>dfs.datanode.data.dir</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.datanode.data.dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <display-name>DataNode directories</display-name>
-    <description>Determines where on the local filesystem an DFS data node
-      should store its blocks.  If this is a comma-delimited
-      list of directories, then data will be stored in all named
-      directories, typically on different devices.
-      Directories that do not exist are ignored.
-    </description>
-    <final>true</final>
-    <value-attributes>
-      <type>directories</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-      not permitted to connect to the namenode.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!--
-    <property>
-      <name>dfs.hosts</name>
-      <value>/etc/hadoop/conf/dfs.include</value>
-      <description>Names a file that contains a list of hosts that are
-      permitted to connect to the namenode. The full pathname of the file
-      must be specified.  If the value is empty, all hosts are
-      permitted.</description>
-    </property>
-  -->
-  <property>
-    <name>dfs.namenode.checkpoint.dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <display-name>SecondaryNameNode Checkpoint directories</display-name>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-    </description>
-    <value-attributes>
-      <type>directories</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.checkpoint.edits.dir</name>
-    <value>${dfs.namenode.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directories then the edits are
-      replicated in all of the directories for redundancy.
-      Default value is same as dfs.namenode.checkpoint.dir
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.checkpoint.period</name>
-    <value>21600</value>
-    <display-name>HDFS Maximum Checkpoint Delay</display-name>
-    <description>The number of seconds between two periodic checkpoints.</description>
-    <value-attributes>
-      <type>int</type>
-      <unit>seconds</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.checkpoint.txns</name>
-    <value>1000000</value>
-    <description>The Secondary NameNode or CheckpointNode will create a checkpoint
-      of the namespace every 'dfs.namenode.checkpoint.txns' transactions,
-      regardless of whether 'dfs.namenode.checkpoint.period' has expired.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <display-name>Block replication</display-name>
-    <description>Default block replication.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.safemode.threshold-pct</name>
-    <value>0.999</value>
-    <description>
-      Specifies the percentage of blocks that should satisfy
-      the minimal replication requirement defined by dfs.namenode.replication.min.
-      Values less than or equal to 0 mean not to start in safe mode.
-      Values greater than 1 will make safe mode permanent.
-    </description>
-    <display-name>Minimum replicated blocks %</display-name>
-    <value-attributes>
-      <type>float</type>
-      <minimum>0.990</minimum>
-      <maximum>1.000</maximum>
-      <increment-step>0.001</increment-step>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.datanode.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-      Specifies the maximum amount of bandwidth that each datanode
-      can utilize for the balancing purpose in term of
-      the number of bytes per second.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
-    <description>
-      This property is used by HftpFileSystem.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-    <description>
-      The datanode server address and port for data transfer.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-    <description>
-      The datanode http server address and port.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.datanode.https.address</name>
-    <value>0.0.0.0:50475</value>
-    <description>
-      The datanode https server address and port.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.blocksize</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.http-address</name>
-    <value>localhost:50070</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for HDFS.</description>
-    <final>true</final>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.rpc-address</name>
-    <value>localhost:8020</value>
-    <description>RPC address that handles all clients requests.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <!-- cluster variant -->
-    <value>1073741824</value>
-    <display-name>Reserved space for HDFS</display-name>
-    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-    </description>
-    <value-attributes>
-      <type>int</type>
-      <unit>bytes</unit>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hdfs-site</type>
-        <name>dfs.datanode.data.dir</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>
-      The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.datanode.max.transfer.threads</name>
-    <value>1024</value>
-    <description>Specifies the maximum number of threads to use for transferring data in and out of the datanode.</description>
-    <display-name>DataNode max data transfer threads</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>48000</maximum>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- Permissions configuration -->
-  <property>
-    <name>fs.permissions.umask-mode</name>
-    <value>022</value>
-    <description>
-      The octal umask used when creating files and directories.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.permissions.enabled</name>
-    <value>true</value>
-    <description>
-      If "true", enable permission checking in HDFS.
-      If "false", permission checking is turned off,
-      but all other behavior is unchanged.
-      Switching from one parameter value to the other does not change the mode,
-      owner or group of files or directories.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.permissions.superusergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>100</value>
-    <description>Added to grow Queue size so that more client connections are allowed</description>
-    <display-name>NameNode Server threads</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>1</minimum>
-      <maximum>200</maximum>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
-    <description>
-      If "true", access tokens are used as capabilities for accessing datanodes.
-      If "false", no access tokens are checked on accessing datanodes.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.namenode.secondary.http-address</name>
-    <value>localhost:50090</value>
-    <description>Address of secondary namenode web server</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.https-address</name>
-    <value>localhost:50470</value>
-    <description>The https address where namenode binds</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-    <display-name>DataNode directories permission</display-name>
-    <description>The permissions that should be there on dfs.datanode.data.dir
-      directories. The datanode will not come up if the permissions are
-      different on existing dfs.datanode.data.dir directories. If the directories
-      don't exist, they will be created with this permission.</description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.accesstime.precision</name>
-    <value>0</value>
-    <display-name>Access time precision</display-name>
-    <description>The access time for HDFS file is precise up to this value.
-      The default value is 1 hour. Setting a value of 0 disables
-      access times for HDFS.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid reading from stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid writing to stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-    <description>When the ratio of number stale datanodes to total datanodes marked is greater
-      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.journalnode.http-address</name>
-    <value>0.0.0.0:8480</value>
-    <description>The address and port the JournalNode web UI listens on.
-      If the port is 0 then the server will start on a free port. </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.journalnode.https-address</name>
-    <value>0.0.0.0:8481</value>
-    <description>The address and port the JournalNode HTTPS server listens on.
-      If the port is 0 then the server will start on a free port. </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.journalnode.edits.dir</name>
-    <value>/hadoop/hdfs/journalnode</value>
-    <description>The path where the JournalNode daemon will store its local state. </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- HDFS Short-Circuit Local Reads -->
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-    <display-name>HDFS Short-circuit read</display-name>
-    <description>
-      This configuration parameter turns on short-circuit local reads.
-    </description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-    <description>
-      This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
-      If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
-    <value>4096</value>
-    <description>
-      The DFSClient maintains a cache of recently opened file descriptors. This
-      parameter controls the size of that cache. Setting this higher will use
-      more file descriptors, but potentially provide better performance on
-      workloads involving lots of seeks.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.name.dir.restore</name>
-    <value>true</value>
-    <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
-      When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.http.policy</name>
-    <value>HTTP_ONLY</value>
-    <description>
-      Decide if HTTPS(SSL) is supported on HDFS This configures the HTTP endpoint for HDFS daemons:
-      The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY :
-      Service is provided only on https - HTTP_AND_HTTPS : Service is provided both on http and https
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.1 -->
-  <property>
-    <name>dfs.namenode.audit.log.async</name>
-    <value>true</value>
-    <description>Whether to enable async auditlog</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.fslock.fair</name>
-    <value>false</value>
-    <description>Whether fsLock is fair</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>dfs.namenode.startup.delay.block.deletion.sec</name>
-    <value>3600</value>
-    <description>
-      The delay in seconds at which we will pause the blocks deletion
-      after Namenode startup. By default it's disabled.
-      In the case a directory has large number of directories and files are
-      deleted, suggested delay is one hour to give the administrator enough time
-      to notice large number of pending deletion blocks and take corrective
-      action.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.client.retry.policy.enabled</name>
-    <value>false</value>
-    <description>Enables HDFS client retry in the event of a NameNode failure.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.content-summary.limit</name>
-    <value>5000</value>
-    <description>Dfs content summary limit.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.encryption.key.provider.uri</name>
-    <description>
-      The KeyProvider to use when interacting with encryption keys used
-      when reading and writing to an encryption zone.
-    </description>
-    <value/>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hadoop-env</type>
-        <name>keyserver_host</name>
-      </property>
-      <property>
-        <type>hadoop-env</type>
-        <name>keyserver_port</name>
-      </property>
-      <property>
-        <type>kms-env</type>
-        <name>kms_port</name>
-      </property>
-      <property>
-        <type>ranger-kms-site</type>
-        <name>ranger.service.https.attrib.ssl.enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.3 -->
-  <property>
-    <name>nfs.file.dump.dir</name>
-    <value>/tmp/.hdfs-nfs</value>
-    <display-name>NFSGateway dump directory</display-name>
-    <description>
-      This directory is used to temporarily save out-of-order writes before
-      writing to HDFS. For each file, the out-of-order writes are dumped after
-      they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
-      One needs to make sure the directory has enough space.
-    </description>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>nfs.exports.allowed.hosts</name>
-    <value>* rw</value>
-    <description>
-      By default, the export can be mounted by any client. To better control the access,
-      users can update the following property. The value string contains machine name and access privilege,
-      separated by whitespace characters. Machine name format can be single host, wildcards, and IPv4
-      networks.The access privilege uses rw or ro to specify readwrite or readonly access of the machines
-      to exports. If the access privilege is not provided, the default is read-only. Entries are separated
-      by &quot;;&quot;. For example: &quot;192.168.0.0/22 rw ; host*.example.com ; host1.test.org ro;&quot;.
-    </description>
-    <display-name>Allowed hosts</display-name>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.encrypt.data.transfer.cipher.suites</name>
-    <value>AES/CTR/NoPadding</value>
-    <description>
-      This value may be either undefined or AES/CTR/NoPadding. If defined, then
-      dfs.encrypt.data.transfer uses the specified cipher suite for data encryption.
-      If not defined, then only the algorithm specified in dfs.encrypt.data.transfer.algorithm
-      is used. By default, the property is not defined.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>dfs.namenode.inode.attributes.provider.class</name>
-    <description>Enable ranger hdfs plugin</description>
-    <depends-on>
-      <property>
-        <type>ranger-hdfs-plugin-properties</type>
-        <name>ranger-hdfs-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <value-attributes>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>


[19/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c358ae0c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c358ae0c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c358ae0c

Branch: refs/heads/branch-2.5
Commit: c358ae0c251748a373542a7b0e6ac9ea9e1b0b78
Parents: 4c04a91
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Mon Dec 19 09:49:24 2016 -0800
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Mon Dec 19 09:49:27 2016 -0800

----------------------------------------------------------------------
 ambari-server/pom.xml                           |    2 -
 .../common-services/HDFS/3.0.0.3.0/alerts.json  | 1786 ----
 .../HDFS/3.0.0.3.0/configuration/core-site.xml  |  224 -
 .../HDFS/3.0.0.3.0/configuration/hadoop-env.xml |  421 -
 .../hadoop-metrics2.properties.xml              |  125 -
 .../3.0.0.3.0/configuration/hadoop-policy.xml   |  130 -
 .../HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml |  226 -
 .../configuration/hdfs-logsearch-conf.xml       |  248 -
 .../HDFS/3.0.0.3.0/configuration/hdfs-site.xml  |  632 --
 .../configuration/ranger-hdfs-audit.xml         |  217 -
 .../ranger-hdfs-plugin-properties.xml           |   98 -
 .../configuration/ranger-hdfs-policymgr-ssl.xml |   67 -
 .../configuration/ranger-hdfs-security.xml      |   65 -
 .../HDFS/3.0.0.3.0/configuration/ssl-client.xml |   70 -
 .../HDFS/3.0.0.3.0/configuration/ssl-server.xml |   80 -
 .../HDFS/3.0.0.3.0/kerberos.json                |  246 -
 .../common-services/HDFS/3.0.0.3.0/metainfo.xml |  405 -
 .../common-services/HDFS/3.0.0.3.0/metrics.json | 7905 ------------------
 .../package/alerts/alert_checkpoint_time.py     |  255 -
 .../alerts/alert_datanode_unmounted_data_dir.py |  177 -
 .../package/alerts/alert_ha_namenode_health.py  |  243 -
 .../package/alerts/alert_metrics_deviation.py   |  470 --
 .../package/alerts/alert_upgrade_finalized.py   |  179 -
 .../HDFS/3.0.0.3.0/package/files/checkWebUI.py  |   83 -
 .../HDFS/3.0.0.3.0/package/scripts/__init__.py  |   20 -
 .../scripts/balancer-emulator/balancer-err.log  | 1032 ---
 .../scripts/balancer-emulator/balancer.log      |   29 -
 .../scripts/balancer-emulator/hdfs-command.py   |   45 -
 .../HDFS/3.0.0.3.0/package/scripts/datanode.py  |  178 -
 .../package/scripts/datanode_upgrade.py         |  156 -
 .../HDFS/3.0.0.3.0/package/scripts/hdfs.py      |  178 -
 .../3.0.0.3.0/package/scripts/hdfs_client.py    |  122 -
 .../3.0.0.3.0/package/scripts/hdfs_datanode.py  |   85 -
 .../3.0.0.3.0/package/scripts/hdfs_namenode.py  |  562 --
 .../package/scripts/hdfs_nfsgateway.py          |   75 -
 .../3.0.0.3.0/package/scripts/hdfs_rebalance.py |  130 -
 .../3.0.0.3.0/package/scripts/hdfs_snamenode.py |   66 -
 .../3.0.0.3.0/package/scripts/install_params.py |   39 -
 .../3.0.0.3.0/package/scripts/journalnode.py    |  203 -
 .../package/scripts/journalnode_upgrade.py      |  152 -
 .../HDFS/3.0.0.3.0/package/scripts/namenode.py  |  424 -
 .../package/scripts/namenode_ha_state.py        |  219 -
 .../package/scripts/namenode_upgrade.py         |  322 -
 .../3.0.0.3.0/package/scripts/nfsgateway.py     |  151 -
 .../HDFS/3.0.0.3.0/package/scripts/params.py    |   28 -
 .../3.0.0.3.0/package/scripts/params_linux.py   |  527 --
 .../3.0.0.3.0/package/scripts/params_windows.py |   79 -
 .../3.0.0.3.0/package/scripts/service_check.py  |  152 -
 .../package/scripts/setup_ranger_hdfs.py        |  121 -
 .../HDFS/3.0.0.3.0/package/scripts/snamenode.py |  155 -
 .../3.0.0.3.0/package/scripts/status_params.py  |   58 -
 .../HDFS/3.0.0.3.0/package/scripts/utils.py     |  384 -
 .../3.0.0.3.0/package/scripts/zkfc_slave.py     |  225 -
 .../package/templates/exclude_hosts_list.j2     |   21 -
 .../3.0.0.3.0/package/templates/hdfs.conf.j2    |   35 -
 .../HDFS/3.0.0.3.0/package/templates/slaves.j2  |   21 -
 .../HDFS/3.0.0.3.0/quicklinks/quicklinks.json   |   80 -
 .../HDFS/3.0.0.3.0/themes/theme.json            |  179 -
 .../common-services/HDFS/3.0.0.3.0/widgets.json |  649 --
 .../YARN/3.0.0.3.0/MAPREDUCE2_metrics.json      | 2596 ------
 .../YARN/3.0.0.3.0/YARN_metrics.json            | 3486 --------
 .../YARN/3.0.0.3.0/YARN_widgets.json            |  670 --
 .../common-services/YARN/3.0.0.3.0/alerts.json  |  392 -
 .../configuration-mapred/mapred-env.xml         |  104 -
 .../mapred-logsearch-conf.xml                   |   80 -
 .../configuration-mapred/mapred-site.xml        |  540 --
 .../configuration/capacity-scheduler.xml        |  183 -
 .../configuration/ranger-yarn-audit.xml         |  177 -
 .../ranger-yarn-plugin-properties.xml           |   82 -
 .../configuration/ranger-yarn-policymgr-ssl.xml |   66 -
 .../configuration/ranger-yarn-security.xml      |   58 -
 .../YARN/3.0.0.3.0/configuration/yarn-env.xml   |  306 -
 .../YARN/3.0.0.3.0/configuration/yarn-log4j.xml |  103 -
 .../configuration/yarn-logsearch-conf.xml       |  104 -
 .../YARN/3.0.0.3.0/configuration/yarn-site.xml  | 1151 ---
 .../YARN/3.0.0.3.0/kerberos.json                |  278 -
 .../common-services/YARN/3.0.0.3.0/metainfo.xml |  383 -
 .../package/alerts/alert_nodemanager_health.py  |  209 -
 .../alerts/alert_nodemanagers_summary.py        |  219 -
 .../files/validateYarnComponentStatusWindows.py |  161 -
 .../YARN/3.0.0.3.0/package/scripts/__init__.py  |   20 -
 .../scripts/application_timeline_server.py      |  162 -
 .../3.0.0.3.0/package/scripts/historyserver.py  |  192 -
 .../3.0.0.3.0/package/scripts/install_jars.py   |   99 -
 .../package/scripts/mapred_service_check.py     |  172 -
 .../package/scripts/mapreduce2_client.py        |   98 -
 .../3.0.0.3.0/package/scripts/nodemanager.py    |  166 -
 .../package/scripts/nodemanager_upgrade.py      |   74 -
 .../YARN/3.0.0.3.0/package/scripts/params.py    |   32 -
 .../3.0.0.3.0/package/scripts/params_linux.py   |  479 --
 .../3.0.0.3.0/package/scripts/params_windows.py |   62 -
 .../package/scripts/resourcemanager.py          |  293 -
 .../YARN/3.0.0.3.0/package/scripts/service.py   |  106 -
 .../3.0.0.3.0/package/scripts/service_check.py  |  185 -
 .../package/scripts/setup_ranger_yarn.py        |   71 -
 .../3.0.0.3.0/package/scripts/status_params.py  |   61 -
 .../YARN/3.0.0.3.0/package/scripts/yarn.py      |  498 --
 .../3.0.0.3.0/package/scripts/yarn_client.py    |   67 -
 .../package/templates/container-executor.cfg.j2 |   40 -
 .../package/templates/exclude_hosts_list.j2     |   21 -
 .../package/templates/mapreduce.conf.j2         |   35 -
 .../package/templates/taskcontroller.cfg.j2     |   38 -
 .../3.0.0.3.0/package/templates/yarn.conf.j2    |   35 -
 .../3.0.0.3.0/quicklinks-mapred/quicklinks.json |   80 -
 .../YARN/3.0.0.3.0/quicklinks/quicklinks.json   |   80 -
 .../YARN/3.0.0.3.0/themes-mapred/theme.json     |  132 -
 .../YARN/3.0.0.3.0/themes/theme.json            |  250 -
 .../ZOOKEEPER/3.4.9/metainfo.xml                |   51 -
 .../HDP/3.0/configuration/cluster-env.xml       |  293 -
 .../HDP/3.0/hooks/after-INSTALL/scripts/hook.py |   37 -
 .../3.0/hooks/after-INSTALL/scripts/params.py   |   97 -
 .../scripts/shared_initialization.py            |  111 -
 .../hooks/before-ANY/files/changeToSecureUid.sh |   53 -
 .../HDP/3.0/hooks/before-ANY/scripts/hook.py    |   36 -
 .../HDP/3.0/hooks/before-ANY/scripts/params.py  |  231 -
 .../before-ANY/scripts/shared_initialization.py |  226 -
 .../3.0/hooks/before-INSTALL/scripts/hook.py    |   37 -
 .../3.0/hooks/before-INSTALL/scripts/params.py  |  113 -
 .../scripts/repo_initialization.py              |   68 -
 .../scripts/shared_initialization.py            |   37 -
 .../3.0/hooks/before-RESTART/scripts/hook.py    |   29 -
 .../hooks/before-START/files/checkForFormat.sh  |   65 -
 .../before-START/files/task-log4j.properties    |  134 -
 .../hooks/before-START/files/topology_script.py |   66 -
 .../HDP/3.0/hooks/before-START/scripts/hook.py  |   39 -
 .../3.0/hooks/before-START/scripts/params.py    |  326 -
 .../before-START/scripts/rack_awareness.py      |   47 -
 .../scripts/shared_initialization.py            |  191 -
 .../templates/commons-logging.properties.j2     |   43 -
 .../templates/exclude_hosts_list.j2             |   21 -
 .../templates/hadoop-metrics2.properties.j2     |  105 -
 .../before-START/templates/health_check.j2      |   81 -
 .../templates/include_hosts_list.j2             |   21 -
 .../templates/topology_mappings.data.j2         |   24 -
 .../main/resources/stacks/HDP/3.0/kerberos.json |   78 -
 .../main/resources/stacks/HDP/3.0/metainfo.xml  |   24 -
 .../HDP/3.0/properties/stack_features.json      |  323 -
 .../stacks/HDP/3.0/properties/stack_tools.json  |    4 -
 .../resources/stacks/HDP/3.0/repos/repoinfo.xml |  132 -
 .../services/HDFS/configuration/hadoop-env.xml  |  166 -
 .../stacks/HDP/3.0/services/HDFS/metainfo.xml   |  145 -
 .../YARN/configuration-mapred/mapred-env.xml    |   49 -
 .../YARN/configuration-mapred/mapred-site.xml   |   78 -
 .../services/YARN/configuration/yarn-site.xml   |   35 -
 .../stacks/HDP/3.0/services/YARN/metainfo.xml   |   81 -
 .../HDP/3.0/services/ZOOKEEPER/metainfo.xml     |   52 -
 .../main/resources/stacks/HDP/3.0/widgets.json  |   95 -
 147 files changed, 39996 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 1208c38..221b4ed 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -286,8 +286,6 @@
             <exclude>src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/service-metrics/*.txt</exclude>
             <exclude>src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/balancer-err.log</exclude>
             <exclude>src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/balancer.log</exclude>
-            <exclude>src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/balancer-emulator/balancer-err.log</exclude>
-            <exclude>src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/balancer-emulator/balancer.log</exclude>
             <exclude>src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/balancer-emulator/balancer.log</exclude>
             <exclude>src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/balancer-emulator/balancer-err.log</exclude>
             <exclude>src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer.log</exclude>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/alerts.json b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/alerts.json
deleted file mode 100644
index 8ccfa47..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/alerts.json
+++ /dev/null
@@ -1,1786 +0,0 @@
-{
-  "HDFS":{
-    "service": [
-      {
-        "name": "datanode_process_percent",
-        "label": "Percent DataNodes Available",
-        "description": "This alert is triggered if the number of down DataNodes in the cluster is greater than the configured critical threshold. It aggregates the results of DataNode process checks.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "datanode_process",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 10
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 30
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          }
-        }
-      },
-      {
-        "name": "datanode_storage_percent",
-        "label": "Percent DataNodes With Available Space",
-        "description": "This service-level alert is triggered if the storage on a certain percentage of DataNodes exceeds either the warning or critical threshold values.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "datanode_storage",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 10
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 30
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          }
-        }
-      },
-      {
-        "name": "journalnode_process_percent",
-        "label": "Percent JournalNodes Available",
-        "description": "This alert is triggered if the number of down JournalNodes in the cluster is greater than the configured critical threshold. It aggregates the results of JournalNode process checks.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "journalnode_process",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 33
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 50
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          }
-        }
-      }
-    ],
-    "NAMENODE": [
-      {
-        "name": "namenode_webui",
-        "label": "NameNode Web UI",
-        "description": "This host-level alert is triggered if the NameNode Web UI is unreachable.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      },
-      {
-        "name": "upgrade_finalized_state",
-        "label": "HDFS Upgrade Finalized State",
-        "description": "This service-level alert is triggered if HDFS is not in the finalized state",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_upgrade_finalized.py",
-          "parameters": []
-        }
-      },
-      {
-        "name": "namenode_cpu",
-        "label": "NameNode Host CPU Utilization",
-        "description": "This host-level alert is triggered if CPU utilization of the NameNode exceeds certain warning and critical thresholds. It checks the NameNode JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "{1} CPU, load {0:.1%}"
-            },
-            "warning": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 200
-            },
-            "critical": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 250
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          },
-          "jmx": {
-            "property_list": [
-              "java.lang:type=OperatingSystem/SystemCpuLoad",
-              "java.lang:type=OperatingSystem/AvailableProcessors"
-            ],
-            "value": "{0} * 100"
-          }
-        }
-      },
-      {
-        "name": "namenode_hdfs_blocks_health",
-        "label": "NameNode Blocks Health",
-        "description": "This service-level alert is triggered if the number of corrupt or missing blocks exceeds the configured critical threshold. The threshold values are in blocks.",
-        "interval": 2,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]"
-            },
-            "warning": {
-              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]",
-              "value": 1
-            },          
-            "critical": {
-              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]",
-              "value": 1
-            },
-            "units" : "Blocks"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=FSNamesystem/MissingBlocks",
-              "Hadoop:service=NameNode,name=FSNamesystem/BlocksTotal"
-            ],
-            "value": "{0}"
-          }
-        }
-      },
-      {
-        "name": "namenode_hdfs_pending_deletion_blocks",
-        "label": "HDFS Pending Deletion Blocks",
-        "description": "This service-level alert is triggered if the number of blocks pending deletion in HDFS exceeds the configured warning and critical thresholds. It checks the NameNode JMX Servlet for the PendingDeletionBlock property.",
-        "interval": 2,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "Pending Deletion Blocks:[{0}]"
-            },
-            "warning": {
-              "text": "Pending Deletion Blocks:[{0}]",
-              "value": 100000
-            },
-            "critical": {
-              "text": "Pending Deletion Blocks:[{0}]",
-              "value": 100000
-            },
-            "units" : "Blocks"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=FSNamesystem/PendingDeletionBlocks"
-            ],
-            "value": "{0}"
-          }
-        }
-      },
-      {
-        "name": "namenode_hdfs_capacity_utilization",
-        "label": "HDFS Capacity Utilization",
-        "description": "This service-level alert is triggered if the HDFS capacity utilization exceeds the configured warning and critical thresholds. It checks the NameNode JMX Servlet for the CapacityUsed and CapacityRemaining properties. The threshold values are in percent.",
-        "interval": 2,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "Capacity Used:[{2:.0f}%, {0}], Capacity Remaining:[{1}]"
-            },
-            "warning": {
-              "text": "Capacity Used:[{2:.0f}%, {0}], Capacity Remaining:[{1}]",
-              "value": 75
-            },          
-            "critical": {
-              "text": "Capacity Used:[{2:.0f}%, {0}], Capacity Remaining:[{1}]",
-              "value": 80
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=FSNamesystemState/CapacityUsed",
-              "Hadoop:service=NameNode,name=FSNamesystemState/CapacityRemaining"
-            ],
-            "value": "{0}/({0} + {1}) * 100.0"
-          }
-        }
-      },
-      {
-        "name": "namenode_rpc_latency",
-        "label": "NameNode RPC Latency",
-        "description": "This host-level alert is triggered if the NameNode RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for NameNode operations. The threshold values are in milliseconds.",
-        "interval": 2,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
-            },
-            "warning": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 3000
-            },          
-            "critical": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 5000
-            },
-            "units" : "ms"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
-              "Hadoop:service=NameNode,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
-            ],
-            "value": "{0}"
-          }
-        }
-      },
-      {
-        "name": "namenode_directory_status",
-        "label": "NameNode Directory Status",
-        "description": "This host-level alert is triggered if the NameNode NameDirStatuses metric (name=NameNodeInfo/NameDirStatuses) reports a failed directory. The threshold values are in the number of directories that are not healthy.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "Directories are healthy"
-            },
-            "warning": {
-              "text": "Failed directory count: {1}",
-              "value": 1
-            },          
-            "critical": {
-              "text": "Failed directory count: {1}",
-              "value": 1
-            },
-            "units" : "Dirs"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=NameNodeInfo/NameDirStatuses"
-            ],
-            "value": "calculate(args)\ndef calculate(args):\n  import json\n  json_statuses = json.loads({0})\n  return len(json_statuses['failed']) if 'failed' in json_statuses else 0"
-          }
-        }
-      },
-      {
-        "name": "datanode_health_summary",
-        "label": "DataNode Health Summary",
-        "description": "This service-level alert is triggered if there are unhealthy DataNodes",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
-              "alias_key": "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
-              "http_pattern": "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
-              "https_pattern": "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "All {2} DataNode(s) are healthy"
-            },
-            "warning": {
-              "text": "DataNode Health: [Live={2}, Stale={1}, Dead={0}]",
-              "value": 1
-            },
-            "critical": {
-              "text": "DataNode Health: [Live={2}, Stale={1}, Dead={0}]",
-              "value": 1
-            },
-            "units": "DNs"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=FSNamesystemState/NumDeadDataNodes",
-              "Hadoop:service=NameNode,name=FSNamesystemState/NumStaleDataNodes",
-              "Hadoop:service=NameNode,name=FSNamesystemState/NumLiveDataNodes"
-            ],
-            "value": "{0} + {1}"
-          }
-        }
-      },
-      {
-        "name": "namenode_last_checkpoint",
-        "label": "NameNode Last Checkpoint",
-        "description": "This service-level alert will trigger if the last time that the NameNode performed a checkpoint was too long ago. It will also trigger if the number of uncommitted transactions is beyond a certain threshold.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_checkpoint_time.py",
-          "parameters": [
-            {
-              "name": "connection.timeout",
-              "display_name": "Connection Timeout",
-              "value": 5.0,
-              "type": "NUMERIC",
-              "description": "The maximum time before this alert is considered to be CRITICAL",
-              "units": "seconds",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "checkpoint.time.warning.threshold",
-              "display_name": "Checkpoint Warning",
-              "value": 200,
-              "type": "PERCENT",
-              "description": "The percentage of the last checkpoint time greater than the interval in order to trigger a warning alert.",
-              "units": "%",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "checkpoint.time.critical.threshold",
-              "display_name": "Checkpoint Critical",
-              "value": 200,
-              "type": "PERCENT",
-              "description": "The percentage of the last checkpoint time greater than the interval in order to trigger a critical alert.",
-              "units": "%",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "checkpoint.txns.multiplier.warning.threshold",
-              "display_name": "Uncommitted transactions Warning",
-              "value": 2.0,
-              "type": "NUMERIC",
-              "description": "The multiplier to use against dfs.namenode.checkpoint.period compared to the difference between last transaction id and most recent transaction id beyond which to trigger a warning alert.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "checkpoint.txns.multiplier.critical.threshold",
-              "display_name": "Uncommitted transactions Critical",
-              "value": 4.0,
-              "type": "NUMERIC",
-              "description": "The multiplier to use against dfs.namenode.checkpoint.period compared to the difference between last transaction id and most recent transaction id beyond which to trigger a critical alert.",
-              "threshold": "CRITICAL"
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_ha_health",
-        "label": "NameNode High Availability Health",
-        "description": "This service-level alert is triggered if either the Active NameNode or Standby NameNode are not running.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "ignore_host": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py",
-          "parameters": [
-            {
-              "name": "connection.timeout",
-              "display_name": "Connection Timeout",
-              "value": 5.0,
-              "type": "NUMERIC",
-              "description": "The maximum time before this alert is considered to be CRITICAL",
-              "units": "seconds",
-              "threshold": "CRITICAL"
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_service_rpc_queue_latency_hourly",
-        "label": "NameNode Service RPC Queue Latency (Hourly)",
-        "description": "This service-level alert is triggered if the deviation of RPC queue latency on datanode port has grown beyond the specified threshold within an hour period.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 60,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "ms",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_client_rpc_queue_latency_hourly",
-        "label": "NameNode Client RPC Queue Latency (Hourly)",
-        "description": "This service-level alert is triggered if the deviation of RPC queue latency on client port has grown beyond the specified threshold within an hour period.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 60,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.client.RpcQueueTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "ms",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_service_rpc_processing_latency_hourly",
-        "label": "NameNode Service RPC Processing Latency (Hourly)",
-        "description": "This service-level alert is triggered if the deviation of RPC latency on datanode port has grown beyond the specified threshold within an hour period.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 60,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "ms",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_client_rpc_processing_latency_hourly",
-        "label": "NameNode Client RPC Processing Latency (Hourly)",
-        "description": "This service-level alert is triggered if the deviation of RPC latency on client port has grown beyond the specified threshold within an hour period.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 60,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.client.RpcProcessingTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "ms",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      },
-      {
-        "name": "increase_nn_heap_usage_daily",
-        "label": "NameNode Heap Usage (Daily)",
-        "description": "This service-level alert is triggered if the NameNode heap usage deviation has grown beyond the specified threshold within a day period.",
-        "interval": 480,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 1440,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "jvm.JvmMetrics.MemHeapUsedM",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 20,
-              "description": "The percentage of NameNode heap usage growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 50,
-              "description": "The percentage of NameNode heap usage growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "MB",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Heap",
-              "value": 100,
-              "type": "NUMERIC",
-              "units": "MB",
-              "description": "The minimum heap increase in a day."
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_service_rpc_processing_latency_daily",
-        "label": "NameNode Service RPC Processing Latency (Daily)",
-        "description": "This service-level alert is triggered if the deviation of RPC latency on datanode port has grown beyond the specified threshold within a day period.",
-        "interval": 480,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 1440,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "ms",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_client_rpc_processing_latency_daily",
-        "label": "NameNode Client RPC Processing Latency (Daily)",
-        "description": "This service-level alert is triggered if the deviation of RPC latency on client port has grown beyond the specified threshold within a day period.",
-        "interval": 480,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 1440,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.client.RpcProcessingTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC processing latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "ms",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_service_rpc_queue_latency_daily",
-        "label": "NameNode Service RPC Queue Latency (Daily)",
-        "description": "This service-level alert is triggered if the deviation of RPC latency on datanode port has grown beyond the specified threshold within a day period.",
-        "interval": 480,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 1440,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "MB",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_client_rpc_queue_latency_daily",
-        "label": "NameNode Client RPC Queue Latency (Daily)",
-        "description": "This service-level alert is triggered if the deviation of RPC latency on client port has grown beyond the specified threshold within a day period.",
-        "interval": 480,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 1440,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "rpc.rpc.client.RpcQueueTimeAvgTime",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 100,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 200,
-              "description": "The percentage of RPC queue latency growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Latency",
-              "value": 30,
-              "type": "NUMERIC",
-              "units": "seconds",
-              "description": "The minimum latency to measure growth."
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "ms",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_increase_in_storage_capacity_usage_daily",
-        "label": "HDFS Storage Capacity Usage (Daily)",
-        "description": "This service-level alert is triggered if the increase in storage capacity usage deviation has grown beyond the specified threshold within a day period.",
-        "interval": 480,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 1440,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "dfs.FSNamesystem.CapacityUsed",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 30,
-              "description": "The percentage of storage capacity usage growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 50,
-              "description": "The percentage of storage capacity usage growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "B",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Capacity",
-              "value": 100,
-              "type": "NUMERIC",
-              "units": "MB",
-              "description": "The minimum capacity increase in a day."
-            }
-          ]
-        }
-      },
-      {
-        "name": "increase_nn_heap_usage_weekly",
-        "label": "NameNode Heap Usage (Weekly)",
-        "description": "This service-level alert is triggered if the NameNode heap usage deviation has grown beyond the specified threshold within a week period.",
-        "interval": 1440,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 10080,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "jvm.JvmMetrics.MemHeapUsedM",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 20,
-              "description": "The percentage of NameNode heap usage growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 50,
-              "description": "The percentage of NameNode heap usage growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "MB",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Heap",
-              "value": 1000,
-              "type": "NUMERIC",
-              "units": "MB",
-              "description": "The minimum heap increase in a week."
-            }
-          ]
-        }
-      },
-      {
-        "name": "namenode_increase_in_storage_capacity_usage_weekly",
-        "label": "HDFS Storage Capacity Usage (Weekly)",
-        "description": "This service-level alert is triggered if the increase in storage capacity usage deviation has grown beyond the specified threshold within a week period.",
-        "interval": 1440,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py",
-          "parameters": [
-            {
-              "name": "mergeHaMetrics",
-              "display_name": "Whether active and stanby NameNodes metrics should be merged",
-              "value": "false",
-              "type": "STRING",
-              "description": "Whether active and stanby NameNodes metrics should be merged.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "interval",
-              "display_name": "Time interval in minutes",
-              "value": 10080,
-              "type": "NUMERIC",
-              "description": "Time interval in minutes.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "appId",
-              "display_name": "AMS application id",
-              "value": "NAMENODE",
-              "type": "STRING",
-              "description": "The application id used to retrieve the metric.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metricName",
-              "display_name": "Metric Name",
-              "value": "dfs.FSNamesystem.CapacityUsed",
-              "type": "STRING",
-              "description": "The metric to monitor.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "metric.deviation.warning.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 10,
-              "description": "The percentage of storage capacity usage growth.",
-              "threshold": "WARNING"
-            },
-            {
-              "name": "metric.deviation.critical.threshold",
-              "display_name": "Growth Rate",
-              "type": "PERCENT",
-              "units": "%",
-              "value": 20,
-              "description": "The percentage of storage capacity usage growth.",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "metric.units",
-              "display_name": "Metric Units",
-              "type": "STRING",
-              "value": "B",
-              "description": "The units that the metric data points are reported in.",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "minimumValue",
-              "display_name": "Minimum Capacity",
-              "value": 1000,
-              "type": "NUMERIC",
-              "units": "MB",
-              "description": "The minimum capacity increase in a week."
-            }
-          ]
-        }
-      }
-    ],
-    "SECONDARY_NAMENODE": [
-      {
-        "name": "secondary_namenode_process",
-        "label": "Secondary NameNode Process",
-        "description": "This host-level alert is triggered if the Secondary NameNode process cannot be confirmed to be up and listening on the network.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.secondary.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.secondary.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY"
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      }
-    ],
-    "NFS_GATEWAY": [
-      {
-        "name": "nfsgateway_process",
-        "label": "NFS Gateway Process",
-        "description": "This host-level alert is triggered if the NFS Gateway process cannot be confirmed to be up and listening on the network.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "PORT",
-          "uri": "{{hdfs-site/nfs.server.port}}",
-          "default_port": 2049,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ],
-    "JOURNALNODE": [
-      {
-        "name": "journalnode_process",
-        "label": "JournalNode Web UI",
-        "description": "This host-level alert is triggered if the JournalNode Web UI is unreachable.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{hdfs-site/dfs.journalnode.http-address}}",
-            "https": "{{hdfs-site/dfs.journalnode.https-address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning": {
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      }
-    ],      
-    "DATANODE": [
-      {
-        "name": "datanode_process",
-        "label": "DataNode Process",
-        "description": "This host-level alert is triggered if the individual DataNode processes cannot be established to be up and listening on the network.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "PORT",        
-          "uri": "{{hdfs-site/dfs.datanode.address}}",
-          "default_port": 50010,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      },
-      {
-        "name": "datanode_webui",
-        "label": "DataNode Web UI",
-        "description": "This host-level alert is triggered if the DataNode Web UI is unreachable.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{hdfs-site/dfs.datanode.http.address}}",
-            "https": "{{hdfs-site/dfs.datanode.https.address}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      },    
-      {
-        "name": "datanode_storage",
-        "label": "DataNode Storage",
-        "description": "This host-level alert is triggered if storage capacity if full on the DataNode. It checks the DataNode JMX Servlet for the Capacity and Remaining properties. The threshold values are in percent.",
-        "interval": 2,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.datanode.http.address}}",
-            "https": "{{hdfs-site/dfs.datanode.https.address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:.0f}% Used, {1}]"
-            },
-            "warning": {
-              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:.0f}% Used, {1}]",
-              "value": 75
-            },
-            "critical": {
-              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:.0f}% Used, {1}]",
-              "value": 80
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=DataNode,name=FSDatasetState-*/Remaining",
-              "Hadoop:service=DataNode,name=FSDatasetState-*/Capacity"
-            ],
-            "value": "({1} - {0})/{1} * 100.0"
-          }
-        }
-      },
-      {
-        "name": "datanode_unmounted_data_dir",
-        "label": "DataNode Unmounted Data Dir",
-        "description": "This host-level alert is triggered if one of the data directories on a host was previously on a mount point and became unmounted. If the mount history file does not exist, then report an error if a host has one or more mounted data directories as well as one or more unmounted data directories on the root partition. This may indicate that a data directory is writing to the root partition, which is undesirable.",
-        "interval": 2,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDFS/2.1.0.2.0/package/alerts/alert_datanode_unmounted_data_dir.py"
-        }
-      },
-      {
-        "name": "datanode_heap_usage",
-        "label": "DataNode Heap Usage",
-        "description": "This host-level alert is triggered if heap usage goes past thresholds on the DataNode. It checks the DataNode JMXServlet for the MemHeapUsedM and MemHeapMaxM properties. The threshold values are in percent.",
-        "interval": 2,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.datanode.http.address}}",
-            "https": "{{hdfs-site/dfs.datanode.https.address}}",
-            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "Used Heap:[{2:.0f}%, {0} MB], Max Heap: {1} MB"
-            },
-            "warning": {
-              "text": "Used Heap:[{2:.0f}%, {0} MB], Max Heap: {1} MB",
-              "value": 80
-            },
-            "critical": {
-              "text": "Used Heap:[{2:.0f}%, {0} MB], Max Heap: {1} MB",
-              "value": 90
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=DataNode,name=JvmMetrics/MemHeapUsedM",
-              "Hadoop:service=DataNode,name=JvmMetrics/MemHeapMaxM"
-            ],
-            "value": "100.0 - (({1} - {0})/{1} * 100.0)"
-          }
-        }
-      }
-    ],
-    "ZKFC": [
-      {
-        "name": "hdfs_zookeeper_failover_controller_process",
-        "label": "ZooKeeper Failover Controller Process",
-        "description": "This host-level alert is triggered if the ZooKeeper Failover Controller process cannot be confirmed to be up and listening on the network.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "PORT",
-          "uri": "{{hdfs-site/dfs.ha.zkfc.port}}",
-          "default_port": 8019,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ]
-  }
-}


[13/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/balancer-emulator/balancer.log
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/balancer-emulator/balancer.log b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/balancer-emulator/balancer.log
deleted file mode 100644
index 2010c02..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/balancer-emulator/balancer.log
+++ /dev/null
@@ -1,29 +0,0 @@
-Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
-Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
-Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
-Jul 28, 2014 5:04:07 PM           2                  0 B             5.40 GB            9.79 GB
-Jul 28, 2014 5:05:14 PM           3                  0 B             5.06 GB            9.79 GB
-Jul 28, 2014 5:05:50 PM           4                  0 B             5.06 GB            9.79 GB
-Jul 28, 2014 5:06:56 PM           5                  0 B             4.81 GB            9.79 GB
-Jul 28, 2014 5:07:33 PM           6                  0 B             4.80 GB            9.79 GB
-Jul 28, 2014 5:09:11 PM           7                  0 B             4.29 GB            9.79 GB
-Jul 28, 2014 5:09:47 PM           8                  0 B             4.29 GB            9.79 GB
-Jul 28, 2014 5:11:24 PM           9                  0 B             3.89 GB            9.79 GB
-Jul 28, 2014 5:12:00 PM          10                  0 B             3.86 GB            9.79 GB
-Jul 28, 2014 5:13:37 PM          11                  0 B             3.23 GB            9.79 GB
-Jul 28, 2014 5:15:13 PM          12                  0 B             2.53 GB            9.79 GB
-Jul 28, 2014 5:15:49 PM          13                  0 B             2.52 GB            9.79 GB
-Jul 28, 2014 5:16:25 PM          14                  0 B             2.51 GB            9.79 GB
-Jul 28, 2014 5:17:01 PM          15                  0 B             2.39 GB            9.79 GB
-Jul 28, 2014 5:17:37 PM          16                  0 B             2.38 GB            9.79 GB
-Jul 28, 2014 5:18:14 PM          17                  0 B             2.31 GB            9.79 GB
-Jul 28, 2014 5:18:50 PM          18                  0 B             2.30 GB            9.79 GB
-Jul 28, 2014 5:19:26 PM          19                  0 B             2.21 GB            9.79 GB
-Jul 28, 2014 5:20:02 PM          20                  0 B             2.10 GB            9.79 GB
-Jul 28, 2014 5:20:38 PM          21                  0 B             2.06 GB            9.79 GB
-Jul 28, 2014 5:22:14 PM          22                  0 B             1.68 GB            9.79 GB
-Jul 28, 2014 5:23:20 PM          23                  0 B             1.00 GB            9.79 GB
-Jul 28, 2014 5:23:56 PM          24                  0 B          1016.16 MB            9.79 GB
-Jul 28, 2014 5:25:33 PM          25                  0 B            30.55 MB            9.79 GB
-The cluster is balanced. Exiting...
-Balancing took 24.858033333333335 minutes

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/balancer-emulator/hdfs-command.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/balancer-emulator/hdfs-command.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/balancer-emulator/hdfs-command.py
deleted file mode 100644
index 88529b4..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/balancer-emulator/hdfs-command.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-import time
-import sys
-from threading import Thread
-
-
-def write_function(path, handle, interval):
-  with open(path) as f:
-      for line in f:
-          handle.write(line)
-          handle.flush()
-          time.sleep(interval)
-          
-thread = Thread(target =  write_function, args = ('balancer.out', sys.stdout, 1.5))
-thread.start()
-
-threaderr = Thread(target =  write_function, args = ('balancer.err', sys.stderr, 1.5 * 0.023))
-threaderr.start()
-
-thread.join()  
-
-
-def rebalancer_out():
-  write_function('balancer.out', sys.stdout)
-  
-def rebalancer_err():
-  write_function('balancer.err', sys.stdout)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
deleted file mode 100644
index 130c021..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
+++ /dev/null
@@ -1,178 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import datanode_upgrade
-from hdfs_datanode import datanode
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, FILE_TYPE_XML
-from resource_management.core.logger import Logger
-from hdfs import hdfs
-from ambari_commons.os_family_impl import OsFamilyImpl
-from ambari_commons import OSConst
-from utils import get_hdfs_binary
-
-class DataNode(Script):
-
-  def get_component_name(self):
-    return "hadoop-hdfs-datanode"
-
-  def get_hdfs_binary(self):
-    """
-    Get the name or path to the hdfs binary depending on the component name.
-    """
-    component_name = self.get_component_name()
-    return get_hdfs_binary(component_name)
-
-
-  def install(self, env):
-    import params
-    env.set_params(params)
-    self.install_packages(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    hdfs("datanode")
-    datanode(action="configure")
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    datanode(action="start")
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    # pre-upgrade steps shutdown the datanode, so there's no need to call
-
-    hdfs_binary = self.get_hdfs_binary()
-    if upgrade_type == "rolling":
-      stopped = datanode_upgrade.pre_rolling_upgrade_shutdown(hdfs_binary)
-      if not stopped:
-        datanode(action="stop")
-    else:
-      datanode(action="stop")
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    datanode(action = "status")
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class DataNodeDefault(DataNode):
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing DataNode Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-hdfs-datanode", params.version)
-
-  def post_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing DataNode Stack Upgrade post-restart")
-    import params
-    env.set_params(params)
-    hdfs_binary = self.get_hdfs_binary()
-    # ensure the DataNode has started and rejoined the cluster
-    datanode_upgrade.post_upgrade_check(hdfs_binary)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['dfs.datanode.keytab.file',
-                         'dfs.datanode.kerberos.principal']
-    props_read_check = ['dfs.datanode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.datanode.keytab.file' not in security_params['hdfs-site'] or
-                  'dfs.datanode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.datanode.keytab.file'],
-                                security_params['hdfs-site']['dfs.datanode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
-  def get_log_folder(self):
-    import params
-    return params.hdfs_log_dir
-  
-  def get_user(self):
-    import params
-    return params.hdfs_user
-
-  def get_pid_files(self):
-    import status_params
-    return [status_params.datanode_pid_file]
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class DataNodeWindows(DataNode):
-  def install(self, env):
-    import install_params
-    self.install_packages(env)
-
-if __name__ == "__main__":
-  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode_upgrade.py
deleted file mode 100644
index b55237d..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode_upgrade.py
+++ /dev/null
@@ -1,156 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import re
-
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.core.resources.system import Execute
-from resource_management.core import shell
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.decorator import retry
-from resource_management.libraries.functions import check_process_status
-from resource_management.core import ComponentIsNotRunning
-from utils import get_dfsadmin_base_command
-
-
-def pre_rolling_upgrade_shutdown(hdfs_binary):
-  """
-  Runs the "shutdownDatanode {ipc_address} upgrade" command to shutdown the
-  DataNode in preparation for an upgrade. This will then periodically check
-  "getDatanodeInfo" to ensure the DataNode has shutdown correctly.
-  This function will obtain the Kerberos ticket if security is enabled.
-  :param hdfs_binary: name/path of the HDFS binary to use
-  :return: Return True if ran ok (even with errors), and False if need to stop the datanode forcefully.
-  """
-  import params
-
-  Logger.info('DataNode executing "shutdownDatanode" command in preparation for upgrade...')
-  if params.security_enabled:
-    Execute(params.dn_kinit_cmd, user = params.hdfs_user)
-
-  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
-  command = format('{dfsadmin_base_command} -shutdownDatanode {dfs_dn_ipc_address} upgrade')
-
-  code, output = shell.call(command, user=params.hdfs_user)
-  if code == 0:
-    # verify that the datanode is down
-    _check_datanode_shutdown(hdfs_binary)
-  else:
-    # Due to bug HDFS-7533, DataNode may not always shutdown during stack upgrade, and it is necessary to kill it.
-    if output is not None and re.search("Shutdown already in progress", output):
-      Logger.error("Due to a known issue in DataNode, the command {0} did not work, so will need to shutdown the datanode forcefully.".format(command))
-      return False
-  return True
-
-
-def post_upgrade_check(hdfs_binary):
-  """
-  Verifies that the DataNode has rejoined the cluster. This function will
-  obtain the Kerberos ticket if security is enabled.
-  :param hdfs_binary: name/path of the HDFS binary to use
-  :return:
-  """
-  import params
-
-  Logger.info("Checking that the DataNode has rejoined the cluster after upgrade...")
-  if params.security_enabled:
-    Execute(params.dn_kinit_cmd, user=params.hdfs_user)
-
-  # verify that the datanode has started and rejoined the HDFS cluster
-  _check_datanode_startup(hdfs_binary)
-
-
-def is_datanode_process_running():
-  import params
-  try:
-    check_process_status(params.datanode_pid_file)
-    return True
-  except ComponentIsNotRunning:
-    return False
-
-@retry(times=24, sleep_time=5, err_class=Fail)
-def _check_datanode_shutdown(hdfs_binary):
-  """
-  Checks that a DataNode is down by running "hdfs dfsamin getDatanodeInfo"
-  several times, pausing in between runs. Once the DataNode stops responding
-  this method will return, otherwise it will raise a Fail(...) and retry
-  automatically.
-  The stack defaults for retrying for HDFS are also way too slow for this
-  command; they are set to wait about 45 seconds between client retries. As
-  a result, a single execution of dfsadmin will take 45 seconds to retry and
-  the DataNode may be marked as dead, causing problems with HBase.
-  https://issues.apache.org/jira/browse/HDFS-8510 tracks reducing the
-  times for ipc.client.connect.retry.interval. In the meantime, override them
-  here, but only for RU.
-  :param hdfs_binary: name/path of the HDFS binary to use
-  :return:
-  """
-  import params
-
-  # override stock retry timeouts since after 30 seconds, the datanode is
-  # marked as dead and can affect HBase during RU
-  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
-  command = format('{dfsadmin_base_command} -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo {dfs_dn_ipc_address}')
-
-  try:
-    Execute(command, user=params.hdfs_user, tries=1)
-  except:
-    Logger.info("DataNode has successfully shutdown for upgrade.")
-    return
-
-  Logger.info("DataNode has not shutdown.")
-  raise Fail('DataNode has not shutdown.')
-
-
-@retry(times=30, sleep_time=30, err_class=Fail) # keep trying for 15 mins
-def _check_datanode_startup(hdfs_binary):
-  """
-  Checks that a DataNode process is running and DataNode is reported as being alive via the
-  "hdfs dfsadmin -fs {namenode_address} -report -live" command. Once the DataNode is found to be
-  alive this method will return, otherwise it will raise a Fail(...) and retry
-  automatically.
-  :param hdfs_binary: name/path of the HDFS binary to use
-  :return:
-  """
-
-  if not is_datanode_process_running():
-    Logger.info("DataNode process is not running")
-    raise Fail("DataNode process is not running")
-
-  import params
-  import socket
-
-  try:
-    dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
-    command = dfsadmin_base_command + ' -report -live'
-    return_code, hdfs_output = shell.call(command, user=params.hdfs_user)
-  except:
-    raise Fail('Unable to determine if the DataNode has started after upgrade.')
-
-  if return_code == 0:
-    hostname = params.hostname.lower()
-    hostname_ip =  socket.gethostbyname(params.hostname.lower())
-    if hostname in hdfs_output.lower() or hostname_ip in hdfs_output.lower():
-      Logger.info("DataNode {0} reports that it has rejoined the cluster.".format(params.hostname))
-      return
-    else:
-      raise Fail("DataNode {0} was not found in the list of live DataNodes".format(params.hostname))
-
-  # return_code is not 0, fail
-  raise Fail("Unable to determine if the DataNode has started after upgrade (result code {0})".format(str(return_code)))

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
deleted file mode 100644
index d9b62e2..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
+++ /dev/null
@@ -1,178 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.core.resources.system import Directory, File, Link
-from resource_management.core.resources import Package
-from resource_management.core.source import Template
-from resource_management.core.resources.service import ServiceConfig
-from resource_management.libraries.resources.xml_config import XmlConfig
-import os
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def hdfs(name=None):
-  import params
-
-  if params.create_lib_snappy_symlinks:
-    install_snappy()
-  
-  # On some OS this folder could be not exists, so we will create it before pushing there files
-  Directory(params.limits_conf_dir,
-            create_parents = True,
-            owner='root',
-            group='root'
-  )
-
-  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
-       owner='root',
-       group='root',
-       mode=0644,
-       content=Template("hdfs.conf.j2")
-  )
-
-  if params.security_enabled:
-    tc_mode = 0644
-    tc_owner = "root"
-  else:
-    tc_mode = None
-    tc_owner = params.hdfs_user
-
-  if "hadoop-policy" in params.config['configurations']:
-    XmlConfig("hadoop-policy.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['hadoop-policy'],
-              configuration_attributes=params.config['configuration_attributes']['hadoop-policy'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  if "ssl-client" in params.config['configurations']:
-    XmlConfig("ssl-client.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['ssl-client'],
-              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-    Directory(params.hadoop_conf_secure_dir,
-              create_parents = True,
-              owner='root',
-              group=params.user_group,
-              cd_access='a',
-              )
-
-    XmlConfig("ssl-client.xml",
-              conf_dir=params.hadoop_conf_secure_dir,
-              configurations=params.config['configurations']['ssl-client'],
-              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  if "ssl-server" in params.config['configurations']:
-    XmlConfig("ssl-server.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['ssl-server'],
-              configuration_attributes=params.config['configuration_attributes']['ssl-server'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  XmlConfig("hdfs-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['hdfs-site'],
-            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  XmlConfig("core-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['core-site'],
-            configuration_attributes=params.config['configuration_attributes']['core-site'],
-            owner=params.hdfs_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  File(os.path.join(params.hadoop_conf_dir, 'slaves'),
-       owner=tc_owner,
-       content=Template("slaves.j2")
-  )
-  
-  if params.lzo_enabled and len(params.lzo_packages) > 0:
-      Package(params.lzo_packages,
-              retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
-              retry_count=params.agent_stack_retry_count)
-      
-def install_snappy():
-  import params
-  Directory([params.so_target_dir_x86, params.so_target_dir_x64],
-            create_parents = True,
-  )    
-  Link(params.so_target_x86,
-       to=params.so_src_x86,
-  )
-  Link(params.so_target_x64,
-       to=params.so_src_x64,
-  )
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def hdfs(component=None):
-  import params
-  if component == "namenode":
-    directories = params.dfs_name_dir.split(",")
-    Directory(directories,
-              owner=params.hdfs_user,
-              mode="(OI)(CI)F",
-              create_parents = True
-    )
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=params.hdfs_user,
-         mode="f",
-         )
-  if params.service_map.has_key(component):
-    service_name = params.service_map[component]
-    ServiceConfig(service_name,
-                  action="change_user",
-                  username=params.hdfs_user,
-                  password=Script.get_password(params.hdfs_user))
-
-  if "hadoop-policy" in params.config['configurations']:
-    XmlConfig("hadoop-policy.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['hadoop-policy'],
-              owner=params.hdfs_user,
-              mode="f",
-              configuration_attributes=params.config['configuration_attributes']['hadoop-policy']
-    )
-
-  XmlConfig("hdfs-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['hdfs-site'],
-            owner=params.hdfs_user,
-            mode="f",
-            configuration_attributes=params.config['configuration_attributes']['hdfs-site']
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
deleted file mode 100644
index 4dabdbc..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
+++ /dev/null
@@ -1,122 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-from hdfs import hdfs
-from ambari_commons.os_family_impl import OsFamilyImpl
-from ambari_commons import OSConst
-from resource_management.core.exceptions import ClientComponentHasNoStatus
-
-class HdfsClient(Script):
-
-  def install(self, env):
-    import params
-    env.set_params(params)
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    hdfs()
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class HdfsClientDefault(HdfsClient):
-
-  def get_component_name(self):
-    return "hadoop-client"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-client", params.version)
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    hdfs_expectations ={}
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'core-site.xml': FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues: # If all validations passed successfully
-        if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
-          try:
-            cached_kinit_executor(status_params.kinit_path_local,
-                       status_params.hdfs_user,
-                       status_params.hdfs_user_keytab,
-                       status_params.hdfs_user_principal,
-                       status_params.hostname,
-                       status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          self.put_structured_out({"securityIssuesFound": "hdfs principal and/or keytab file is not specified"})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class HdfsClientWindows(HdfsClient):
-  def install(self, env):
-    import install_params
-    self.install_packages(env)
-    self.configure(env)
-
-if __name__ == "__main__":
-  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_datanode.py
deleted file mode 100644
index 2d3d4f5..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_datanode.py
+++ /dev/null
@@ -1,85 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-from resource_management.core.resources.system import Directory, Execute, File
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.mounted_dirs_helper import handle_mounted_dirs
-from utils import service
-from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
-from ambari_commons import OSConst
-
-
-def create_dirs(data_dir):
-  """
-  :param data_dir: The directory to create
-  :param params: parameters
-  """
-  import params
-  Directory(data_dir,
-            create_parents = True,
-            cd_access="a",
-            mode=0755,
-            owner=params.hdfs_user,
-            group=params.user_group,
-            ignore_failures=True
-  )
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def datanode(action=None):
-  if action == "configure":
-    import params
-    Directory(params.dfs_domain_socket_dir,
-              create_parents = True,
-              mode=0751,
-              owner=params.hdfs_user,
-              group=params.user_group)
-
-    # handle_mounted_dirs ensures that we don't create dfs data dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
-    data_dir_to_mount_file_content = handle_mounted_dirs(create_dirs, params.dfs_data_dirs, params.data_dir_mount_file, params)
-    # create a history file used by handle_mounted_dirs
-    File(params.data_dir_mount_file,
-         owner=params.hdfs_user,
-         group=params.user_group,
-         mode=0644,
-         content=data_dir_to_mount_file_content
-    )
-
-  elif action == "start" or action == "stop":
-    import params
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
-  elif action == "status":
-    import status_params
-    check_process_status(status_params.datanode_pid_file)
-
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def datanode(action=None):
-  if action == "configure":
-    pass
-  elif(action == "start" or action == "stop"):
-    import params
-    Service(params.datanode_win_service_name, action=action)
-  elif action == "status":
-    import status_params
-    check_windows_service_status(status_params.datanode_win_service_name)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
deleted file mode 100644
index 23119f0..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
+++ /dev/null
@@ -1,562 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os.path
-import time
-
-from resource_management.core import shell
-from resource_management.core.source import Template
-from resource_management.core.resources.system import File, Execute, Directory
-from resource_management.core.resources.service import Service
-from resource_management.libraries.functions import namenode_ha_utils
-from resource_management.libraries.functions.decorator import retry
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
-from resource_management.libraries.functions import Direction
-from ambari_commons import OSCheck, OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
-from utils import get_dfsadmin_base_command
-
-if OSCheck.is_windows_family():
-  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
-
-from resource_management.core.exceptions import Fail
-from resource_management.core.logger import Logger
-
-from utils import service, safe_zkfc_op, is_previous_fs_image
-from setup_ranger_hdfs import setup_ranger_hdfs, create_ranger_audit_hdfs_directories
-
-import namenode_upgrade
-
-def wait_for_safemode_off(hdfs_binary, afterwait_sleep=0, execute_kinit=False):
-  """
-  During NonRolling (aka Express Upgrade), after starting NameNode, which is still in safemode, and then starting
-  all of the DataNodes, we need for NameNode to receive all of the block reports and leave safemode.
-  If HA is present, then this command will run individually on each NameNode, which checks for its own address.
-  """
-  import params
-
-  retries = 115
-  sleep_seconds = 10
-  sleep_minutes = int(sleep_seconds * retries / 60)
-
-  Logger.info("Waiting up to {0} minutes for the NameNode to leave Safemode...".format(sleep_minutes))
-
-  if params.security_enabled and execute_kinit:
-    kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}")
-    Execute(kinit_command, user=params.hdfs_user, logoutput=True)
-
-  try:
-    # Note, this fails if namenode_address isn't prefixed with "params."
-
-    dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary, use_specific_namenode=True)
-    is_namenode_safe_mode_off = dfsadmin_base_command + " -safemode get | grep 'Safe mode is OFF'"
-
-    # Wait up to 30 mins
-    Execute(is_namenode_safe_mode_off, tries=retries, try_sleep=sleep_seconds,
-      user=params.hdfs_user, logoutput=True)
-
-    # Wait a bit more since YARN still depends on block reports coming in.
-    # Also saw intermittent errors with HBASE service check if it was done too soon.
-    time.sleep(afterwait_sleep)
-  except Fail:
-    Logger.error("The NameNode is still in Safemode. Please be careful with commands that need Safemode OFF.")
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
-    upgrade_suspended=False, env=None):
-
-  if action is None:
-    raise Fail('"action" parameter is required for function namenode().')
-
-  if action in ["start", "stop"] and hdfs_binary is None:
-    raise Fail('"hdfs_binary" parameter is required for function namenode().')
-
-  if action == "configure":
-    import params
-    #we need this directory to be present before any action(HA manual steps for
-    #additional namenode)
-    create_name_dirs(params.dfs_name_dir)
-  elif action == "start":
-    Logger.info("Called service {0} with upgrade_type: {1}".format(action, str(upgrade_type)))
-    setup_ranger_hdfs(upgrade_type=upgrade_type)
-    import params
-    if do_format and not params.hdfs_namenode_format_disabled:
-      format_namenode()
-      pass
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-
-    if params.dfs_ha_enabled and \
-      params.dfs_ha_namenode_standby is not None and \
-      params.hostname == params.dfs_ha_namenode_standby:
-        # if the current host is the standby NameNode in an HA deployment
-        # run the bootstrap command, to start the NameNode in standby mode
-        # this requires that the active NameNode is already up and running,
-        # so this execute should be re-tried upon failure, up to a timeout
-        success = bootstrap_standby_namenode(params)
-        if not success:
-          raise Fail("Could not bootstrap standby namenode")
-
-    if upgrade_type == "rolling" and params.dfs_ha_enabled:
-      # Most likely, ZKFC is up since RU will initiate the failover command. However, if that failed, it would have tried
-      # to kill ZKFC manually, so we need to start it if not already running.
-      safe_zkfc_op(action, env)
-
-    options = ""
-    if upgrade_type == "rolling":
-      if params.upgrade_direction == Direction.UPGRADE:
-        options = "-rollingUpgrade started"
-      elif params.upgrade_direction == Direction.DOWNGRADE:
-        options = "-rollingUpgrade downgrade"
-    elif upgrade_type == "nonrolling":
-      is_previous_image_dir = is_previous_fs_image()
-      Logger.info("Previous file system image dir present is {0}".format(str(is_previous_image_dir)))
-
-      if params.upgrade_direction == Direction.UPGRADE:
-        options = "-rollingUpgrade started"
-      elif params.upgrade_direction == Direction.DOWNGRADE:
-        options = "-rollingUpgrade downgrade"
-    elif upgrade_type is None and upgrade_suspended is True:
-      # the rollingUpgrade flag must be passed in during a suspended upgrade when starting NN
-      if os.path.exists(namenode_upgrade.get_upgrade_in_progress_marker()):
-        options = "-rollingUpgrade started"
-      else:
-        Logger.info("The NameNode upgrade marker file {0} does not exist, yet an upgrade is currently suspended. "
-                    "Assuming that the upgrade of NameNode has not occurred yet.".format(namenode_upgrade.get_upgrade_in_progress_marker()))
-
-    Logger.info("Options for start command are: {0}".format(options))
-
-    service(
-      action="start",
-      name="namenode",
-      user=params.hdfs_user,
-      options=options,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
-
-    if params.security_enabled:
-      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
-              user = params.hdfs_user)
-
-    # ___Scenario___________|_Expected safemode state__|_Wait for safemode OFF____|
-    # no-HA                 | ON -> OFF                | Yes                      |
-    # HA and active         | ON -> OFF                | Yes                      |
-    # HA and standby        | no change                | No                       |
-    # RU with HA on active  | ON -> OFF                | Yes                      |
-    # RU with HA on standby | ON -> OFF                | Yes                      |
-    # EU with HA on active  | ON -> OFF                | No                       |
-    # EU with HA on standby | ON -> OFF                | No                       |
-    # EU non-HA             | ON -> OFF                | No                       |
-
-    # because we do things like create directories after starting NN,
-    # the vast majority of the time this should be True - it should only
-    # be False if this is HA and we are the Standby NN
-    ensure_safemode_off = True
-
-    # True if this is the only NameNode (non-HA) or if its the Active one in HA
-    is_active_namenode = True
-
-    if params.dfs_ha_enabled:
-      Logger.info("Waiting for the NameNode to broadcast whether it is Active or Standby...")
-
-      if is_this_namenode_active() is False:
-        # we are the STANDBY NN
-        is_active_namenode = False
-
-        # we are the STANDBY NN and this restart is not part of an upgrade
-        if upgrade_type is None:
-          ensure_safemode_off = False
-
-
-    # During an Express Upgrade, NameNode will not leave SafeMode until the DataNodes are started,
-    # so always disable the Safemode check
-    if upgrade_type == "nonrolling":
-      ensure_safemode_off = False
-
-    # some informative logging separate from the above logic to keep things a little cleaner
-    if ensure_safemode_off:
-      Logger.info("Waiting for this NameNode to leave Safemode due to the following conditions: HA: {0}, isActive: {1}, upgradeType: {2}".format(
-        params.dfs_ha_enabled, is_active_namenode, upgrade_type))
-    else:
-      Logger.info("Skipping Safemode check due to the following conditions: HA: {0}, isActive: {1}, upgradeType: {2}".format(
-        params.dfs_ha_enabled, is_active_namenode, upgrade_type))
-
-
-    # wait for Safemode to end
-    if ensure_safemode_off:
-      wait_for_safemode_off(hdfs_binary)
-
-    # Always run this on the "Active" NN unless Safemode has been ignored
-    # in the case where safemode was ignored (like during an express upgrade), then
-    # NN will be in SafeMode and cannot have directories created
-    if is_active_namenode and ensure_safemode_off:
-      create_hdfs_directories()
-      create_ranger_audit_hdfs_directories()
-    else:
-      Logger.info("Skipping creation of HDFS directories since this is either not the Active NameNode or we did not wait for Safemode to finish.")
-
-  elif action == "stop":
-    import params
-    service(
-      action="stop", name="namenode", 
-      user=params.hdfs_user
-    )
-  elif action == "status":
-    import status_params
-    check_process_status(status_params.namenode_pid_file)
-  elif action == "decommission":
-    decommission()
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
-    upgrade_suspended=False, env=None):
-
-  if action is None:
-    raise Fail('"action" parameter is required for function namenode().')
-
-  if action in ["start", "stop"] and hdfs_binary is None:
-    raise Fail('"hdfs_binary" parameter is required for function namenode().')
-
-  if action == "configure":
-    pass
-  elif action == "start":
-    import params
-    #TODO: Replace with format_namenode()
-    namenode_format_marker = os.path.join(params.hadoop_conf_dir,"NN_FORMATTED")
-    if not os.path.exists(namenode_format_marker):
-      hadoop_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hadoop.cmd"))
-      Execute("%s namenode -format" % (hadoop_cmd))
-      open(namenode_format_marker, 'a').close()
-    Service(params.namenode_win_service_name, action=action)
-  elif action == "stop":
-    import params
-    Service(params.namenode_win_service_name, action=action)
-  elif action == "status":
-    import status_params
-    check_windows_service_status(status_params.namenode_win_service_name)
-  elif action == "decommission":
-    decommission()
-
-def create_name_dirs(directories):
-  import params
-
-  dirs = directories.split(",")
-  Directory(dirs,
-            mode=0755,
-            owner=params.hdfs_user,
-            group=params.user_group,
-            create_parents = True,
-            cd_access="a",
-  )
-
-
-def create_hdfs_directories():
-  import params
-
-  params.HdfsResource(params.hdfs_tmp_dir,
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.hdfs_user,
-                       mode=0777,
-  )
-  params.HdfsResource(params.smoke_hdfs_user_dir,
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.smoke_user,
-                       mode=params.smoke_hdfs_user_mode,
-  )
-  params.HdfsResource(None, 
-                      action="execute",
-  )
-
-def format_namenode(force=None):
-  import params
-
-  old_mark_dir = params.namenode_formatted_old_mark_dirs
-  mark_dir = params.namenode_formatted_mark_dirs
-  dfs_name_dir = params.dfs_name_dir
-  hdfs_user = params.hdfs_user
-  hadoop_conf_dir = params.hadoop_conf_dir
-
-  if not params.dfs_ha_enabled:
-    if force:
-      ExecuteHadoop('namenode -format',
-                    bin_dir=params.hadoop_bin_dir,
-                    conf_dir=hadoop_conf_dir)
-    else:
-      if not is_namenode_formatted(params):
-        Execute(format("hdfs --config {hadoop_conf_dir} namenode -format -nonInteractive"),
-                user = params.hdfs_user,
-                path = [params.hadoop_bin_dir]
-        )
-        for m_dir in mark_dir:
-          Directory(m_dir,
-            create_parents = True
-          )
-  else:
-    if params.dfs_ha_namenode_active is not None and \
-       params.hostname == params.dfs_ha_namenode_active:
-      # check and run the format command in the HA deployment scenario
-      # only format the "active" namenode in an HA deployment
-      if force:
-        ExecuteHadoop('namenode -format',
-                      bin_dir=params.hadoop_bin_dir,
-                      conf_dir=hadoop_conf_dir)
-      else:
-        nn_name_dirs = params.dfs_name_dir.split(',')
-        if not is_namenode_formatted(params):
-          try:
-            Execute(format("hdfs --config {hadoop_conf_dir} namenode -format -nonInteractive"),
-                    user = params.hdfs_user,
-                    path = [params.hadoop_bin_dir]
-            )
-          except Fail:
-            # We need to clean-up mark directories, so we can re-run format next time.
-            for nn_name_dir in nn_name_dirs:
-              Execute(format("rm -rf {nn_name_dir}/*"),
-                      user = params.hdfs_user,
-              )
-            raise
-          for m_dir in mark_dir:
-            Directory(m_dir,
-              create_parents = True
-            )
-
-def is_namenode_formatted(params):
-  old_mark_dirs = params.namenode_formatted_old_mark_dirs
-  mark_dirs = params.namenode_formatted_mark_dirs
-  nn_name_dirs = params.dfs_name_dir.split(',')
-  marked = False
-  # Check if name directories have been marked as formatted
-  for mark_dir in mark_dirs:
-    if os.path.isdir(mark_dir):
-      marked = True
-      Logger.info(format("{mark_dir} exists. Namenode DFS already formatted"))
-    
-  # Ensure that all mark dirs created for all name directories
-  if marked:
-    for mark_dir in mark_dirs:
-      Directory(mark_dir,
-        create_parents = True
-      )      
-    return marked  
-  
-  # Move all old format markers to new place
-  for old_mark_dir in old_mark_dirs:
-    if os.path.isdir(old_mark_dir):
-      for mark_dir in mark_dirs:
-        Execute(('cp', '-ar', old_mark_dir, mark_dir),
-                sudo = True
-        )
-        marked = True
-      Directory(old_mark_dir,
-        action = "delete"
-      )    
-    elif os.path.isfile(old_mark_dir):
-      for mark_dir in mark_dirs:
-        Directory(mark_dir,
-                  create_parents = True,
-        )
-      Directory(old_mark_dir,
-        action = "delete"
-      )
-      marked = True
-      
-  if marked:
-    return True
-
-  # Check if name dirs are not empty
-  for name_dir in nn_name_dirs:
-    code, out = shell.call(("ls", name_dir))
-    dir_exists_and_valid = bool(not code)
-
-    if not dir_exists_and_valid: # situations if disk exists but is crashed at the moment (ls: reading directory ...: Input/output error)
-      Logger.info(format("NameNode will not be formatted because the directory {name_dir} is missing or cannot be checked for content. {out}"))
-      return True
-
-    try:
-      Execute(format("ls {name_dir} | wc -l  | grep -q ^0$"),
-      )
-    except Fail:
-      Logger.info(format("NameNode will not be formatted since {name_dir} exists and contains content"))
-      return True
-       
-  return False
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def decommission():
-  import params
-
-  hdfs_user = params.hdfs_user
-  conf_dir = params.hadoop_conf_dir
-  user_group = params.user_group
-  nn_kinit_cmd = params.nn_kinit_cmd
-  
-  File(params.exclude_file_path,
-       content=Template("exclude_hosts_list.j2"),
-       owner=hdfs_user,
-       group=user_group
-  )
-  
-  if not params.update_exclude_file_only:
-    Execute(nn_kinit_cmd,
-            user=hdfs_user
-    )
-
-    if params.dfs_ha_enabled:
-      # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
-      # need to execute each command scoped to a particular namenode
-      nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
-    else:
-      nn_refresh_cmd = format('dfsadmin -fs {namenode_address} -refreshNodes')
-    ExecuteHadoop(nn_refresh_cmd,
-                  user=hdfs_user,
-                  conf_dir=conf_dir,
-                  bin_dir=params.hadoop_bin_dir)
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def decommission():
-  import params
-  hdfs_user = params.hdfs_user
-  conf_dir = params.hadoop_conf_dir
-
-  File(params.exclude_file_path,
-       content=Template("exclude_hosts_list.j2"),
-       owner=hdfs_user
-  )
-
-  if params.dfs_ha_enabled:
-    # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
-    # need to execute each command scoped to a particular namenode
-    nn_refresh_cmd = format('cmd /c hadoop dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
-  else:
-    nn_refresh_cmd = format('cmd /c hadoop dfsadmin -fs {namenode_address} -refreshNodes')
-  Execute(nn_refresh_cmd, user=hdfs_user)
-
-
-def bootstrap_standby_namenode(params, use_path=False):
-  mark_dirs = params.namenode_bootstrapped_mark_dirs
-  bin_path = os.path.join(params.hadoop_bin_dir, '') if use_path else ""
-  try:
-    iterations = 50
-    bootstrapped = False
-    bootstrap_cmd = format("{bin_path}hdfs namenode -bootstrapStandby -nonInteractive")
-    # Blue print based deployments start both NN in parallel and occasionally
-    # the first attempt to bootstrap may fail. Depending on how it fails the
-    # second attempt may not succeed (e.g. it may find the folder and decide that
-    # bootstrap succeeded). The solution is to call with -force option but only
-    # during initial start
-    if params.command_phase == "INITIAL_START":
-      # force bootstrap in INITIAL_START phase
-      bootstrap_cmd = format("{bin_path}hdfs namenode -bootstrapStandby -nonInteractive -force")
-    elif is_namenode_bootstrapped(params):
-      # Once out of INITIAL_START phase bootstrap only if we couldnt bootstrap during cluster deployment
-      return True
-    Logger.info("Boostrapping standby namenode: %s" % (bootstrap_cmd))
-    for i in range(iterations):
-      Logger.info('Try %d out of %d' % (i+1, iterations))
-      code, out = shell.call(bootstrap_cmd, logoutput=False, user=params.hdfs_user)
-      if code == 0:
-        Logger.info("Standby namenode bootstrapped successfully")
-        bootstrapped = True
-        break
-      elif code == 5:
-        Logger.info("Standby namenode already bootstrapped")
-        bootstrapped = True
-        break
-      else:
-        Logger.warning('Bootstrap standby namenode failed with %d error code. Will retry' % (code))
-  except Exception as ex:
-    Logger.error('Bootstrap standby namenode threw an exception. Reason %s' %(str(ex)))
-  if bootstrapped:
-    for mark_dir in mark_dirs:
-      Directory(mark_dir,
-                create_parents = True
-                )
-  return bootstrapped
-
-def is_namenode_bootstrapped(params):
-  mark_dirs = params.namenode_bootstrapped_mark_dirs
-  nn_name_dirs = params.dfs_name_dir.split(',')
-  marked = False
-  # Check if name directories have been marked as formatted
-  for mark_dir in mark_dirs:
-    if os.path.isdir(mark_dir):
-      marked = True
-      Logger.info(format("{mark_dir} exists. Standby Namenode already bootstrapped"))
-      break
-
-  # Ensure that all mark dirs created for all name directories
-  if marked:
-    for mark_dir in mark_dirs:
-      Directory(mark_dir,
-                create_parents = True
-                )
-
-  return marked
-
-
-@retry(times=125, sleep_time=5, backoff_factor=2, err_class=Fail)
-def is_this_namenode_active():
-  """
-  Gets whether the current NameNode is Active. This function will wait until the NameNode is
-  listed as being either Active or Standby before returning a value. This is to ensure that
-  that if the other NameNode is Active, we ensure that this NameNode has fully loaded and
-  registered in the event that the other NameNode is going to be restarted. This prevents
-  a situation where we detect the other NameNode as Active before this NameNode has fully booted.
-  If the other Active NameNode is then restarted, there can be a loss of service if this
-  NameNode has not entered Standby.
-  """
-  import params
-
-  # returns ([('nn1', 'c6401.ambari.apache.org:50070')], [('nn2', 'c6402.ambari.apache.org:50070')], [])
-  #                  0                                           1                                   2
-  # or
-  # returns ([], [('nn1', 'c6401.ambari.apache.org:50070')], [('nn2', 'c6402.ambari.apache.org:50070')], [])
-  #          0                                              1                                             2
-  #
-  namenode_states = namenode_ha_utils.get_namenode_states(params.hdfs_site, params.security_enabled,
-    params.hdfs_user, times=5, sleep_time=5, backoff_factor=2)
-
-  # unwraps [('nn1', 'c6401.ambari.apache.org:50070')]
-  active_namenodes = [] if len(namenode_states[0]) < 1 else namenode_states[0]
-
-  # unwraps [('nn2', 'c6402.ambari.apache.org:50070')]
-  standby_namenodes = [] if len(namenode_states[1]) < 1 else namenode_states[1]
-
-  # check to see if this is the active NameNode
-  for entry in active_namenodes:
-    if params.namenode_id in entry:
-      return True
-
-  # if this is not the active NameNode, then we must wait for it to register as standby
-  for entry in standby_namenodes:
-    if params.namenode_id in entry:
-      return False
-
-  # this this point, this NameNode is neither active nor standby - we must wait to ensure it
-  # enters at least one of these roles before returning a verdict - the annotation will catch
-  # this failure and retry the fuction automatically
-  raise Fail(format("The NameNode {namenode_id} is not listed as Active or Standby, waiting..."))
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_nfsgateway.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_nfsgateway.py
deleted file mode 100644
index 672312a..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_nfsgateway.py
+++ /dev/null
@@ -1,75 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.core.exceptions import Fail
-from resource_management.core.logger import Logger
-from resource_management.core.resources import Directory
-from resource_management.core import shell
-from utils import service
-import subprocess,os
-
-# NFS GATEWAY is always started by root using jsvc due to rpcbind bugs
-# on Linux such as CentOS6.2. https://bugzilla.redhat.com/show_bug.cgi?id=731542
-
-def prepare_rpcbind():
-  Logger.info("check if native nfs server is running")
-  p, output = shell.call("pgrep nfsd")
-  if p == 0 :
-    Logger.info("native nfs server is running. shutting it down...")
-    # shutdown nfs
-    shell.call("service nfs stop")
-    shell.call("service nfs-kernel-server stop")
-    Logger.info("check if the native nfs server is down...")
-    p, output = shell.call("pgrep nfsd")
-    if p == 0 :
-      raise Fail("Failed to shutdown native nfs service")
-
-  Logger.info("check if rpcbind or portmap is running")
-  p, output = shell.call("pgrep rpcbind")
-  q, output = shell.call("pgrep portmap")
-
-  if p!=0 and q!=0 :
-    Logger.info("no portmap or rpcbind running. starting one...")
-    p, output = shell.call(("service", "rpcbind", "start"), sudo=True)
-    q, output = shell.call(("service", "portmap", "start"), sudo=True)
-    if p!=0 and q!=0 :
-      raise Fail("Failed to start rpcbind or portmap")
-
-  Logger.info("now we are ready to start nfs gateway")
-
-
-def nfsgateway(action=None, format=False):
-  import params
-
-  if action== "start":
-    prepare_rpcbind()
-
-  if action == "configure":
-    Directory(params.nfs_file_dump_dir,
-              owner = params.hdfs_user,
-              group = params.user_group,
-    )
-  elif action == "start" or action == "stop":
-    service(
-      action=action,
-      name="nfs3",
-      user=params.root_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_rebalance.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_rebalance.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_rebalance.py
deleted file mode 100644
index 1dc545e..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_rebalance.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import re
-
-class HdfsParser():
-  def __init__(self):
-    self.initialLine = None
-    self.state = None
-  
-  def parseLine(self, line):
-    hdfsLine = HdfsLine()
-    type, matcher = hdfsLine.recognizeType(line)
-    if(type == HdfsLine.LineType.HeaderStart):
-      self.state = 'PROCESS_STARTED'
-    elif (type == HdfsLine.LineType.Progress):
-      self.state = 'PROGRESS'
-      hdfsLine.parseProgressLog(line, matcher)
-      if(self.initialLine == None): self.initialLine = hdfsLine
-      
-      return hdfsLine 
-    elif (type == HdfsLine.LineType.ProgressEnd):
-      self.state = 'PROCESS_FINISED'
-    return None
-    
-class HdfsLine():
-  
-  class LineType:
-    HeaderStart, Progress, ProgressEnd, Unknown = range(4)
-  
-  
-  MEMORY_SUFFIX = ['B','KB','MB','GB','TB','PB','EB']
-  MEMORY_PATTERN = '(?P<memmult_%d>(?P<memory_%d>(\d+)(.|,)?(\d+)?) (?P<mult_%d>'+"|".join(MEMORY_SUFFIX)+'))'
-  
-  HEADER_BEGIN_PATTERN = re.compile('Time Stamp\w+Iteration#\w+Bytes Already Moved\w+Bytes Left To Move\w+Bytes Being Moved')
-  PROGRESS_PATTERN = re.compile(
-                            "(?P<date>.*?)\s+" + 
-                            "(?P<iteration>\d+)\s+" + 
-                            MEMORY_PATTERN % (1,1,1) + "\s+" + 
-                            MEMORY_PATTERN % (2,2,2) + "\s+" +
-                            MEMORY_PATTERN % (3,3,3)
-                            )
-  PROGRESS_END_PATTERN = re.compile('(The cluster is balanced. Exiting...|The cluster is balanced. Exiting...)')
-  
-  def __init__(self):
-    self.date = None
-    self.iteration = None
-    self.bytesAlreadyMoved = None 
-    self.bytesLeftToMove = None
-    self.bytesBeingMoved = None 
-    self.bytesAlreadyMovedStr = None 
-    self.bytesLeftToMoveStr = None
-    self.bytesBeingMovedStr = None 
-  
-  def recognizeType(self, line):
-    for (type, pattern) in (
-                            (HdfsLine.LineType.HeaderStart, self.HEADER_BEGIN_PATTERN),
-                            (HdfsLine.LineType.Progress, self.PROGRESS_PATTERN), 
-                            (HdfsLine.LineType.ProgressEnd, self.PROGRESS_END_PATTERN)
-                            ):
-      m = re.match(pattern, line)
-      if m:
-        return type, m
-    return HdfsLine.LineType.Unknown, None
-    
-  def parseProgressLog(self, line, m):
-    '''
-    Parse the line of 'hdfs rebalancer' output. The example output being parsed:
-    
-    Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
-    Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
-    Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
-    
-    Throws AmbariException in case of parsing errors
-
-    '''
-    m = re.match(self.PROGRESS_PATTERN, line)
-    if m:
-      self.date = m.group('date') 
-      self.iteration = int(m.group('iteration'))
-       
-      self.bytesAlreadyMoved = self.parseMemory(m.group('memory_1'), m.group('mult_1')) 
-      self.bytesLeftToMove = self.parseMemory(m.group('memory_2'), m.group('mult_2')) 
-      self.bytesBeingMoved = self.parseMemory(m.group('memory_3'), m.group('mult_3'))
-       
-      self.bytesAlreadyMovedStr = m.group('memmult_1') 
-      self.bytesLeftToMoveStr = m.group('memmult_2')
-      self.bytesBeingMovedStr = m.group('memmult_3') 
-    else:
-      raise AmbariException("Failed to parse line [%s]") 
-  
-  def parseMemory(self, memorySize, multiplier_type):
-    try:
-      factor = self.MEMORY_SUFFIX.index(multiplier_type)
-    except ValueError:
-      raise AmbariException("Failed to memory value [%s %s]" % (memorySize, multiplier_type))
-    
-    return float(memorySize) * (1024 ** factor)
-  def toJson(self):
-    return {
-            'timeStamp' : self.date,
-            'iteration' : self.iteration,
-            
-            'dataMoved': self.bytesAlreadyMovedStr,
-            'dataLeft' : self.bytesLeftToMoveStr,
-            'dataBeingMoved': self.bytesBeingMovedStr,
-            
-            'bytesMoved': self.bytesAlreadyMoved,
-            'bytesLeft' : self.bytesLeftToMove,
-            'bytesBeingMoved': self.bytesBeingMoved,
-          }
-  def __str__(self):
-    return "[ date=%s,iteration=%d, bytesAlreadyMoved=%d, bytesLeftToMove=%d, bytesBeingMoved=%d]"%(self.date, self.iteration, self.bytesAlreadyMoved, self.bytesLeftToMove, self.bytesBeingMoved)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_snamenode.py
deleted file mode 100644
index 8d4c40c..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_snamenode.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from utils import service
-from resource_management.core.resources.system import Directory, File
-from resource_management.core.source import Template
-from resource_management.libraries.functions.check_process_status import check_process_status
-from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
-from ambari_commons import OSConst
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def snamenode(action=None, format=False):
-  if action == "configure":
-    import params
-    for fs_checkpoint_dir in params.fs_checkpoint_dirs:
-      Directory(fs_checkpoint_dir,
-                create_parents = True,
-                cd_access="a",
-                mode=0755,
-                owner=params.hdfs_user,
-                group=params.user_group)
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group)
-  elif action == "start" or action == "stop":
-    import params
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
-  elif action == "status":
-    import status_params
-    check_process_status(status_params.snamenode_pid_file)
-
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def snamenode(action=None, format=False):
-  if action == "configure":
-    pass
-  elif action == "start" or action == "stop":
-    import params
-    Service(params.snamenode_win_service_name, action=action)
-  elif action == "status":
-    import status_params
-    check_windows_service_status(status_params.snamenode_win_service_name)
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
deleted file mode 100644
index fe488c3..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
+++ /dev/null
@@ -1,39 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from ambari_commons import OSCheck
-
-# These parameters are supposed to be referenced at installation time, before the Hadoop environment variables have been set
-if OSCheck.is_windows_family():
-  exclude_packages = []
-else:
-  from resource_management.libraries.functions.default import default
-  from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
-  from resource_management.libraries.script.script import Script
-
-  _config = Script.get_config()
-  stack_version_unformatted = str(_config['hostLevelParams']['stack_version'])
-
-  # The logic for LZO also exists in OOZIE's params.py
-  io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
-  lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-  lzo_packages = get_lzo_packages(stack_version_unformatted)
-
-  exclude_packages = []
-  if not lzo_enabled:
-    exclude_packages += lzo_packages

http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode.py
deleted file mode 100644
index 46df454..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode.py
+++ /dev/null
@@ -1,203 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Directory
-from utils import service
-from hdfs import hdfs
-import journalnode_upgrade
-from ambari_commons.os_family_impl import OsFamilyImpl
-from ambari_commons import OSConst
-
-class JournalNode(Script):
-  def install(self, env):
-    import params
-    env.set_params(params)
-    self.install_packages(env)  
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class JournalNodeDefault(JournalNode):
-
-  def get_component_name(self):
-    return "hadoop-hdfs-journalnode"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-hdfs-journalnode", params.version)
-
-  def start(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    service(
-      action="start", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def post_upgrade_restart(self, env, upgrade_type=None):
-    if upgrade_type == "nonrolling":
-      return
-
-    Logger.info("Executing Stack Upgrade post-restart")
-    import params
-    env.set_params(params)
-    journalnode_upgrade.post_upgrade_check()
-
-  def stop(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    service(
-      action="stop", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def configure(self, env):
-    import params
-
-    Directory(params.jn_edits_dir,
-              create_parents = True,
-              cd_access="a",
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    env.set_params(params)
-    hdfs()
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.journalnode_pid_file)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    props_value_check = None
-    props_empty_check = ['dfs.journalnode.keytab.file',
-                         'dfs.journalnode.kerberos.principal']
-    props_read_check = ['dfs.journalnode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(hdfs_site_expectations)
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.journalnode.kerberos.keytab.file' not in security_params['hdfs-site'] or
-                  'dfs.journalnode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.journalnode.kerberos.keytab.file'],
-                                security_params['hdfs-site']['dfs.journalnode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
-  def get_log_folder(self):
-    import params
-    return params.hdfs_log_dir
-  
-  def get_user(self):
-    import params
-    return params.hdfs_user
-
-  def get_pid_files(self):
-    import status_params
-    return [status_params.journalnode_pid_file]
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class JournalNodeWindows(JournalNode):
-  def install(self, env):
-    import install_params
-    self.install_packages(env)
-
-  def start(self, env):
-    import params
-    self.configure(env)
-    Service(params.journalnode_win_service_name, action="start")
-
-  def stop(self, env):
-    import params
-    Service(params.journalnode_win_service_name, action="stop")
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    hdfs("journalnode")
-    pass
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_windows_service_status(status_params.journalnode_win_service_name)
-
-if __name__ == "__main__":
-  JournalNode().execute()


[10/19] ambari git commit: AMBARI-19229. Remove HDP-3.0.0 stack definition from Ambari-2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c358ae0c/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/widgets.json b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/widgets.json
deleted file mode 100644
index 4a645b0..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/widgets.json
+++ /dev/null
@@ -1,649 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_hdfs_dashboard",
-      "display_name": "Standard HDFS Dashboard",
-      "section_name": "HDFS_SUMMARY",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "NameNode GC count",
-          "description": "Count of total garbage collections and count of major type garbage collections of the JVM.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "jvm.JvmMetrics.GcCount._rate",
-              "metric_path": "metrics/jvm/gcCount._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate",
-              "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "GC total count",
-              "value": "${jvm.JvmMetrics.GcCount._rate}"
-            },
-            {
-              "name": "GC count of type major collection",
-              "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NameNode GC time",
-          "description": "Total time taken by major type garbage collections in milliseconds.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate",
-              "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "GC time in major collection",
-              "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NN Connection Load",
-          "description": "Number of open RPC connections being managed by NameNode.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "rpc.rpc.client.NumOpenConnections",
-              "metric_path": "metrics/rpc/client/NumOpenConnections",
-              "category": "",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.datanode.NumOpenConnections",
-              "metric_path": "metrics/rpc/datanode/NumOpenConnections",
-              "category": "",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Open Client Connections",
-              "value": "${rpc.rpc.client.NumOpenConnections}"
-            },
-            {
-              "name": "Open Datanode Connections",
-              "value": "${rpc.rpc.datanode.NumOpenConnections}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NameNode Heap",
-          "description": "Heap memory committed and Heap memory used with respect to time.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "jvm.JvmMetrics.MemHeapCommittedM",
-              "metric_path": "metrics/jvm/memHeapCommittedM",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "jvm.JvmMetrics.MemHeapUsedM",
-              "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "JVM heap committed",
-              "value": "${jvm.JvmMetrics.MemHeapCommittedM}"
-            },
-            {
-              "name": "JVM heap used",
-              "value": "${jvm.JvmMetrics.MemHeapUsedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NameNode Host Load",
-          "description": "Percentage of CPU and Memory resources being consumed on NameNode host.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "cpu_system",
-              "metric_path": "metrics/cpu/cpu_system",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_user",
-              "metric_path": "metrics/cpu/cpu_user",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_nice",
-              "metric_path": "metrics/cpu/cpu_nice",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_idle",
-              "metric_path": "metrics/cpu/cpu_idle",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "CPU utilization",
-              "value": "${((cpu_system + cpu_user + cpu_nice)/(cpu_system + cpu_user + cpu_nice + cpu_idle + cpu_wio)) * 100}"
-            },
-            {
-              "name": "Memory utilization",
-              "value": "${((mem_total - mem_free)/mem_total) * 100}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1",
-            "display_unit": "%"
-          }
-        },
-        {
-          "widget_name": "NameNode RPC",
-          "description": "Compares the average time spent for RPC request in a queue and RPC request being processed.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "rpc.rpc.client.RpcQueueTimeAvgTime",
-              "metric_path": "metrics/rpc/client/RpcQueueTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.client.RpcProcessingTimeAvgTime",
-              "metric_path": "metrics/rpc/client/RpcProcessingTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
-              "metric_path": "metrics/rpc/datanode/RpcQueueTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            },
-            {
-              "name": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
-              "metric_path": "metrics/rpc/datanode/RpcProcessingTime_avg_time",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Client RPC Queue Wait time",
-              "value": "${rpc.rpc.client.RpcQueueTimeAvgTime}"
-            },
-            {
-              "name": "Client RPC Processing time",
-              "value": "${rpc.rpc.client.RpcProcessingTimeAvgTime}"
-            },
-            {
-              "name": "Datanode RPC Queue Wait time",
-              "value": "${rpc.rpc.datanode.RpcQueueTimeAvgTime}"
-            },
-            {
-              "name": "Datanode RPC Processing time",
-              "value": "${rpc.rpc.datanode.RpcProcessingTimeAvgTime}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1",
-            "display_unit": "ms"
-          }
-        },
-        {
-          "widget_name": "NameNode Operations",
-          "description": "Rate per second of number of file operation over time.",
-          "widget_type": "GRAPH",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.namenode.TotalFileOps._rate",
-              "metric_path": "metrics/dfs/namenode/TotalFileOps._rate",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "NameNode File Operations",
-              "value": "${dfs.namenode.TotalFileOps._rate}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Failed disk volumes",
-          "description": "Number of Failed disk volumes across all DataNodes. Its indicative of HDFS bad health.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum",
-              "metric_path": "metrics/dfs/datanode/NumFailedVolumes",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Failed disk volumes",
-              "value": "${FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum}"
-            }
-          ],
-          "properties": {
-            "display_unit": ""
-          }
-        },
-        {
-          "widget_name": "Blocks With Corrupted Replicas",
-          "description": "Number represents data blocks with at least one corrupted replica (but not all of them). Its indicative of HDFS bad health.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
-              "metric_path": "metrics/dfs/FSNamesystem/CorruptBlocks",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Blocks With Corrupted Replicas",
-              "value": "${Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks}"
-            }
-          ],
-          "properties": {
-            "warning_threshold": "0",
-            "error_threshold": "50"
-          }
-        },
-        {
-          "widget_name": "Under Replicated Blocks",
-          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of HDFS bad health.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
-              "metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
-              "service_name": "HDFS",
-              "component_name": "NAMENODE",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Under Replicated Blocks",
-              "value": "${Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks}"
-            }
-          ],
-          "properties": {
-            "warning_threshold": "0",
-            "error_threshold": "50"
-          }
-        },
-        {
-          "widget_name": "HDFS Space Utilization",
-          "description": "Percentage of available space used in the DFS.",
-          "widget_type": "GAUGE",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
-              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
-              "metric_path": "metrics/dfs/datanode/Capacity",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Space Utilization",
-              "value": "${(FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity}"
-            }
-          ],
-          "properties": {
-            "warning_threshold": "0.75",
-            "error_threshold": "0.9"
-          }
-        }
-      ]
-    },
-    {
-      "layout_name": "default_hdfs_heatmap",
-      "section_name": "HDFS_HEATMAPS",
-      "display_name": "HDFS Heatmaps",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "HDFS Bytes Read",
-          "default_section_name": "HDFS_HEATMAPS",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "dfs.datanode.BytesRead._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Bytes Read",
-              "value": "${dfs.datanode.BytesRead._rate}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "1024"
-          }
-        },
-        {
-          "widget_name": "HDFS Bytes Written",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.datanode.BytesWritten._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Bytes Written",
-              "value": "${dfs.datanode.BytesWritten._rate}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "1024"
-          }
-        },
-        {
-          "widget_name": "DataNode Garbage Collection Time",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis",
-              "metric_path": "metrics/jvm/gcTimeMillis",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode Garbage Collection Time",
-              "value": "${Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "max_limit": "10000"
-          }
-        },
-        {
-          "widget_name": "DataNode JVM Heap Memory Used",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
-              "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode JVM Heap Memory Used",
-              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "512"
-          }
-        },
-        {
-          "widget_name": "DataNode JVM Heap Memory Committed",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM",
-              "metric_path": "metrics/jvm/memHeapCommittedM",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode JVM Heap Memory Committed",
-              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "512"
-          }
-        },
-        {
-          "widget_name": "DataNode Process Disk I/O Utilization",
-          "default_section_name": "HDFS_HEATMAPS",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.datanode.BytesRead._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.BytesWritten._rate",
-              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.TotalReadTime._rate",
-              "metric_path": "metrics/dfs/datanode/TotalReadTime._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.TotalWriteTime._rate",
-              "metric_path": "metrics/dfs/datanode/TotalWriteTime._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode Process Disk I/O Utilization",
-              "value": "${((dfs.datanode.BytesRead._rate/dfs.datanode.TotalReadTime._rate)+(dfs.datanode.BytesWritten._rate/dfs.datanode.TotalWriteTime._rate))*50}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "DataNode Process Network I/O Utilization",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "dfs.datanode.RemoteBytesRead._rate",
-              "metric_path": "metrics/dfs/datanode/RemoteBytesRead._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.ReadsFromRemoteClient._rate",
-              "metric_path": "metrics/dfs/datanode/reads_from_remote_client._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.RemoteBytesWritten._rate",
-              "metric_path": "metrics/dfs/datanode/RemoteBytesWritten._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "dfs.datanode.WritesFromRemoteClient._rate",
-              "metric_path": "metrics/dfs/datanode/writes_from_remote_client._rate",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "DataNode Process Network I/O Utilization",
-              "value": "${((dfs.datanode.RemoteBytesRead._rate/dfs.datanode.ReadsFromRemoteClient._rate)+(dfs.datanode.RemoteBytesWritten._rate/dfs.datanode.WritesFromRemoteClient._rate))*50}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "HDFS Space Utilization",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
-              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            },
-            {
-              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
-              "metric_path": "metrics/dfs/datanode/Capacity",
-              "service_name": "HDFS",
-              "component_name": "DATANODE"
-            }
-          ],
-          "values": [
-            {
-              "name": "HDFS Space Utilization",
-              "value": "${((FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}