You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ad...@apache.org on 2017/05/23 09:53:15 UTC

[17/50] [abbrv] ambari git commit: AMBARI-21048. HDP 3.0 TP - create service definition for Storm with configs, kerberos, widgets, etc.(vbrodetsky)

AMBARI-21048. HDP 3.0 TP - create service definition for Storm with configs, kerberos, widgets, etc.(vbrodetsky)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6ab4d28a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6ab4d28a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6ab4d28a

Branch: refs/heads/ambari-rest-api-explorer
Commit: 6ab4d28a6973cec9a2d04592bfa6fcdfcf081988
Parents: 0e5f247
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Thu May 18 20:33:04 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Thu May 18 20:33:43 2017 +0300

----------------------------------------------------------------------
 .../common-services/STORM/1.0.1.3.0/alerts.json |  145 +++
 .../configuration/ranger-storm-audit.xml        |  133 ++
 .../ranger-storm-plugin-properties.xml          |  121 ++
 .../ranger-storm-policymgr-ssl.xml              |   70 +
 .../configuration/ranger-storm-security.xml     |   67 +
 .../storm-atlas-application.properties.xml      |   31 +
 .../configuration/storm-cluster-log4j.xml       |  133 ++
 .../STORM/1.0.1.3.0/configuration/storm-env.xml |  165 +++
 .../1.0.1.3.0/configuration/storm-site.xml      | 1002 +++++++++++++++
 .../configuration/storm-worker-log4j.xml        |  189 +++
 .../STORM/1.0.1.3.0/kerberos.json               |  134 ++
 .../STORM/1.0.1.3.0/metainfo.xml                |  179 +++
 .../STORM/1.0.1.3.0/metrics.json                | 1202 ++++++++++++++++++
 .../alerts/check_supervisor_process_win.py      |   50 +
 .../STORM/1.0.1.3.0/package/files/wordCount.jar |  Bin 0 -> 690588 bytes
 .../1.0.1.3.0/package/scripts/drpc_server.py    |   91 ++
 .../STORM/1.0.1.3.0/package/scripts/nimbus.py   |  116 ++
 .../1.0.1.3.0/package/scripts/nimbus_prod.py    |   81 ++
 .../1.0.1.3.0/package/scripts/pacemaker.py      |   90 ++
 .../STORM/1.0.1.3.0/package/scripts/params.py   |   28 +
 .../1.0.1.3.0/package/scripts/params_linux.py   |  424 ++++++
 .../1.0.1.3.0/package/scripts/params_windows.py |   60 +
 .../STORM/1.0.1.3.0/package/scripts/rest_api.py |   85 ++
 .../STORM/1.0.1.3.0/package/scripts/service.py  |   95 ++
 .../1.0.1.3.0/package/scripts/service_check.py  |   79 ++
 .../package/scripts/setup_ranger_storm.py       |  133 ++
 .../1.0.1.3.0/package/scripts/status_params.py  |   83 ++
 .../STORM/1.0.1.3.0/package/scripts/storm.py    |  182 +++
 .../1.0.1.3.0/package/scripts/storm_upgrade.py  |  177 +++
 .../package/scripts/storm_yaml_utils.py         |   53 +
 .../1.0.1.3.0/package/scripts/supervisor.py     |  117 ++
 .../package/scripts/supervisor_prod.py          |   84 ++
 .../package/scripts/supervisord_service.py      |   33 +
 .../1.0.1.3.0/package/scripts/ui_server.py      |  137 ++
 .../package/templates/client_jaas.conf.j2       |   33 +
 .../1.0.1.3.0/package/templates/config.yaml.j2  |   75 ++
 .../templates/input.config-storm.json.j2        |   78 ++
 .../templates/storm-metrics2.properties.j2      |   34 +
 .../1.0.1.3.0/package/templates/storm.conf.j2   |   35 +
 .../package/templates/storm_jaas.conf.j2        |   65 +
 .../package/templates/worker-launcher.cfg.j2    |   19 +
 .../STORM/1.0.1.3.0/quicklinks/quicklinks.json  |   45 +
 .../STORM/1.0.1.3.0/role_command_order.json     |   13 +
 .../STORM/1.0.1.3.0/widgets.json                |  127 ++
 .../stacks/HDP/3.0/services/STORM/metainfo.xml  |   27 +
 45 files changed, 6320 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/alerts.json b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/alerts.json
new file mode 100644
index 0000000..acd9d85
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/alerts.json
@@ -0,0 +1,145 @@
+{
+  "STORM": {
+    "service": [
+      {
+        "name": "storm_supervisor_process_percent",
+        "label": "Percent Supervisors Available",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "storm_supervisor_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 10
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 30
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          }
+        }
+      }
+    ],
+    "STORM_UI_SERVER": [
+      {
+        "name": "storm_webui",
+        "label": "Storm Web UI",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{storm-site/ui.port}}",
+            "https" : "{{storm-site/ui.https.port}}",
+            "kerberos_keytab": "{{storm-env/storm_ui_keytab}}",
+            "kerberos_principal": "{{storm-env/storm_ui_principal_name}}",
+            "connection_timeout": 5.0,
+            "https_property": "{{storm-site/ui.https.keystore.type}}",
+            "https_property_value": "jks"
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      }      
+    ],
+    "NIMBUS": [
+      {
+        "name": "storm_nimbus_process",
+        "label": "Nimbus Process",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{storm-site/nimbus.thrift.port}}",
+          "default_port": 6627,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],
+    "DRPC_SERVER": [
+      {
+        "name": "storm_drpc_server",
+        "label": "DRPC Server Process",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{storm-site/drpc.port}}",
+          "default_port": 3772,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],
+    "SUPERVISOR": [
+      {
+        "name": "storm_supervisor_process",
+        "label": "Supervisor Process",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{storm-env/jmxremote_port}}",
+          "default_port": 56431,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-audit.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-audit.xml
new file mode 100644
index 0000000..18a6c93
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-audit.xml
@@ -0,0 +1,133 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/storm/audit/hdfs/spool</value>
+    <description>/var/log/storm/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value/>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/storm/audit/solr/spool</value>
+    <description>/var/log/storm/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>false</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+  <property>
+    <name>ranger.plugin.storm.ambari.cluster.name</name>
+    <value>{{cluster_name}}</value>
+    <description>Capture cluster name from where Ranger storm plugin is enabled.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-plugin-properties.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-plugin-properties.xml
new file mode 100644
index 0000000..99f6e4d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-plugin-properties.xml
@@ -0,0 +1,121 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>policy_user</name>
+    <value>{{policy_user}}</value>
+    <display-name>Policy user for STORM</display-name>
+    <description>This user must be system user and also present at Ranger admin portal</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>common.name.for.certificate</name>
+    <value/>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-storm-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for STORM</display-name>
+    <description>Enable ranger storm plugin ?</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-storm-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>stormtestuser@EXAMPLE.COM</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>stormtestuser</value>
+    <display-name>Ranger repository config password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-policymgr-ssl.xml
new file mode 100644
index 0000000..cec82b0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-policymgr-ssl.xml
@@ -0,0 +1,70 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/hdp/current/storm-client/conf/ranger-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/hdp/current/storm-client/conf/ranger-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-security.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-security.xml
new file mode 100644
index 0000000..7b1ed0f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-security.xml
@@ -0,0 +1,67 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.plugin.storm.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing policies for this Storm instance</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.storm.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.storm.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>ranger.plugin.storm.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.storm.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+  <property>
+    <name>ranger.plugin.storm.policy.rest.ssl.config.file</name>
+    <value>/usr/hdp/current/storm-client/conf/ranger-policymgr-ssl.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-atlas-application.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-atlas-application.properties.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-atlas-application.properties.xml
new file mode 100644
index 0000000..47d7758
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-atlas-application.properties.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+  <!-- These are the Atlas Hooks properties specific to this service. This file is then merged with common properties
+  that apply to all services. -->
+  <property>
+    <name>atlas.hook.storm.numRetries</name>
+    <value>3</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-cluster-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-cluster-log4j.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-cluster-log4j.xml
new file mode 100644
index 0000000..d7f7ae0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-cluster-log4j.xml
@@ -0,0 +1,133 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="false">
+
+  <property>
+    <name>storm_a1_maxfilesize</name>
+    <value>100</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Storm Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>storm_a1_maxbackupindex</name>
+    <value>9</value>
+    <description>The number of backup files</description>
+    <display-name>Storm Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>storm-cluster-log4j template</display-name>
+    <description>Custom cluster.xml</description>
+    <value><![CDATA[
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration monitorInterval="60">
+<properties>
+    <property name="pattern">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} %t [%p] %msg%n</property>
+</properties>
+<appenders>
+    <RollingFile name="A1" immediateFlush="false"
+                 fileName="${sys:storm.log.dir}/${sys:logfile.name}"
+                 filePattern="${sys:storm.log.dir}/${sys:logfile.name}.%i.gz">
+        <PatternLayout>
+            <pattern>${pattern}</pattern>
+        </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="{{storm_a1_maxfilesize}} MB"/> <!-- Or every 100 MB -->
+        </Policies>
+        <DefaultRolloverStrategy max="{{storm_a1_maxbackupindex}}"/>
+    </RollingFile>
+    <RollingFile name="WEB-ACCESS" immediateFlush="false"
+                 fileName="${sys:storm.log.dir}/access-web-${sys:daemon.name}.log"
+                 filePattern="${sys:storm.log.dir}/access-web-${sys:daemon.name}.log.%i.gz">
+        <PatternLayout>
+            <pattern>${pattern}</pattern>
+        </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="100 MB"/> <!-- Or every 100 MB -->
+        </Policies>
+        <DefaultRolloverStrategy max="9"/>
+    </RollingFile>
+    <RollingFile name="THRIFT-ACCESS" immediateFlush="false"
+                 fileName="${sys:storm.log.dir}/access-${sys:logfile.name}"
+                 filePattern="${sys:storm.log.dir}/access-${sys:logfile.name}.%i.gz">
+    <PatternLayout>
+        <pattern>${pattern}</pattern>
+    </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="100 MB"/> <!-- Or every 100 MB -->
+        </Policies>
+        <DefaultRolloverStrategy max="9"/>
+    </RollingFile>
+    <Syslog name="syslog" format="RFC5424" charset="UTF-8" host="localhost" port="514"
+            protocol="UDP" appName="[${sys:daemon.name}]" mdcId="mdc" includeMDC="true"
+            facility="LOCAL5" enterpriseNumber="18060" newLine="true" exceptionPattern="%rEx{full}"
+            messageId="[${sys:user.name}:S0]" id="storm" immediateFlush="true" immediateFail="true"/>
+</appenders>
+<loggers>
+
+    <Logger name="org.apache.storm.logging.filters.AccessLoggingFilter" level="info" additivity="false">
+        <AppenderRef ref="WEB-ACCESS"/>
+        <AppenderRef ref="syslog"/>
+    </Logger>
+    <Logger name="org.apache.storm.logging.ThriftAccessLogger" level="info" additivity="false">
+        <AppenderRef ref="THRIFT-ACCESS"/>
+        <AppenderRef ref="syslog"/>
+    </Logger>
+    <root level="info"> <!-- We log everything -->
+        <appender-ref ref="A1"/>
+        <appender-ref ref="syslog"/>
+    </root>
+</loggers>
+</configuration>
+
+    ]]></value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-env.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-env.xml
new file mode 100644
index 0000000..3ee0602
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-env.xml
@@ -0,0 +1,165 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+    <property>
+        <name>storm_user</name>
+        <display-name>Storm User</display-name>
+        <value>storm</value>
+        <property-type>USER</property-type>
+        <description/>
+        <value-attributes>
+            <type>user</type>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_log_dir</name>
+        <value>/var/log/storm</value>
+        <description/>
+        <value-attributes>
+            <type>directory</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_pid_dir</name>
+        <value>/var/run/storm</value>
+        <description/>
+        <value-attributes>
+            <type>directory</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>jmxremote_port</name>
+        <value>56431</value>
+        <description/>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_principal_name</name>
+        <description>Storm principal name</description>
+        <property-type>KERBEROS_PRINCIPAL</property-type>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_principal_name</name>
+        <description>Storm principal name</description>
+        <property-type>KERBEROS_PRINCIPAL</property-type>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_keytab</name>
+        <description>Storm keytab path</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_ui_principal_name</name>
+        <description>Storm UI principal name</description>
+        <property-type>KERBEROS_PRINCIPAL</property-type>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_ui_keytab</name>
+        <description>Storm UI keytab path</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus_keytab</name>
+        <description>Nimbus keytab path</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus_principal_name</name>
+        <description>Nimbus principal name</description>
+        <property-type>KERBEROS_PRINCIPAL</property-type>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_user_nofile_limit</name>
+        <value>128000</value>
+        <description>Max open files limit setting for STORM user.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_user_nproc_limit</name>
+        <value>65536</value>
+        <description>Max number of processes limit setting for STORM user.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <!-- storm-env.sh -->
+
+
+    <property>
+        <name>storm.atlas.hook</name>
+        <value>false</value>
+        <display-name>Enable Atlas Hook</display-name>
+        <description>Enable Atlas Hook</description>
+        <value-attributes>
+            <type>boolean</type>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+        <depends-on>
+            <property>
+                <type>application-properties</type>
+                <name>atlas.rest.address</name>
+            </property>
+        </depends-on>
+    </property>
+    <property>
+        <name>nimbus_seeds_supported</name>
+        <value>true</value>
+        <description/>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_logs_supported</name>
+        <value>true</value>
+        <description/>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <!-- storm-env.sh -->
+    <property>
+        <name>content</name>
+        <display-name>storm-env template</display-name>
+        <description>This is the jinja template for storm-env.sh file</description>
+        <value>
+            #!/bin/bash
+
+            # Set Storm specific environment variables here.
+
+            # The java implementation to use.
+            export JAVA_HOME={{java64_home}}
+
+            export STORM_CONF_DIR={{conf_dir}}
+            export STORM_HOME={{storm_component_home_dir}}
+
+            export STORM_JAR_JVM_OPTS={{jar_jvm_opts}}
+        </value>
+        <value-attributes>
+            <type>content</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-site.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-site.xml
new file mode 100644
index 0000000..6b97fb6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-site.xml
@@ -0,0 +1,1002 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+    <property>
+        <name>storm.local.dir</name>
+        <value>/hadoop/storm</value>
+        <description>A directory on the local filesystem used by Storm for any local
+            filesystem usage it needs. The directory must exist and the Storm daemons must
+            have permission to read/write from this location.</description>
+        <value-attributes>
+            <type>directory</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.servers</name>
+        <value>['localhost']</value>
+        <description>A list of hosts of ZooKeeper servers used to manage the cluster.</description>
+        <value-attributes>
+            <type>multiLine</type>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.port</name>
+        <value>2181</value>
+        <description>The port Storm will use to connect to each of the ZooKeeper servers.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.root</name>
+        <value>/storm</value>
+        <description>The root location at which Storm stores data in ZooKeeper.</description>
+        <value-attributes>
+            <type>directory</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.session.timeout</name>
+        <value>30000</value>
+        <description>The session timeout for clients to ZooKeeper.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.connection.timeout</name>
+        <value>30000</value>
+        <description>The connection timeout for clients to ZooKeeper.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.retry.times</name>
+        <value>5</value>
+        <description>The number of times to retry a Zookeeper operation.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.retry.interval</name>
+        <value>1000</value>
+        <description>The interval between retries of a Zookeeper operation.</description>
+        <value-attributes>
+            <unit>ms</unit>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.retry.intervalceiling.millis</name>
+        <value>30000</value>
+        <description>The ceiling of the interval between retries of a Zookeeper operation.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>ms</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.cluster.mode</name>
+        <value>distributed</value>
+        <description>The mode this Storm cluster is running in. Either "distributed" or "local".</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.local.mode.zmq</name>
+        <value>false</value>
+        <description>Whether or not to use ZeroMQ for messaging in local mode. If this is set
+            to false, then Storm will use a pure-Java messaging system. The purpose
+            of this flag is to make it easy to run Storm in local mode by eliminating
+            the need for native dependencies, which can be difficult to install.
+        </description>
+        <value-attributes>
+            <type>boolean</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+
+    <property>
+        <name>nimbus.thrift.port</name>
+        <value>6627</value>
+        <description> Which port the Thrift interface of Nimbus should run on. Clients should
+            connect to this port to upload jars and submit topologies.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.thrift.max_buffer_size</name>
+        <value>1048576</value>
+        <description>The maximum buffer size thrift should use when reading messages.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>bytes</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>nimbus.task.timeout.secs</name>
+        <value>30</value>
+        <description>How long without heartbeating a task can go before nimbus will consider the task dead and reassign it to another location.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.supervisor.timeout.secs</name>
+        <value>60</value>
+        <description>How long before a supervisor can go without heartbeating before nimbus considers it dead and stops assigning new work to it.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.monitor.freq.secs</name>
+        <value>10</value>
+        <description>
+            How often nimbus should wake up to check heartbeats and do reassignments. Note
+            that if a machine ever goes down Nimbus will immediately wake up and take action.
+            This parameter is for checking for failures when there's no explicit event like that occuring.
+        </description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.cleanup.inbox.freq.secs</name>
+        <value>600</value>
+        <description>How often nimbus should wake the cleanup thread to clean the inbox.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.inbox.jar.expiration.secs</name>
+        <value>3600</value>
+        <description>
+            The length of time a jar file lives in the inbox before being deleted by the cleanup thread.
+
+            Probably keep this value greater than or equal to NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS.
+            Note that the time it takes to delete an inbox jar file is going to be somewhat more than
+            NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS (depending on how often NIMBUS_CLEANUP_FREQ_SECS is set to).
+        </description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.task.launch.secs</name>
+        <value>120</value>
+        <description>A special timeout used when a task is initially launched. During launch, this is the timeout
+            used until the first heartbeat, overriding nimbus.task.timeout.secs.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.reassign</name>
+        <value>true</value>
+        <description>Whether or not nimbus should reassign tasks if it detects that a task goes down.
+            Defaults to true, and it's not recommended to change this value.</description>
+        <value-attributes>
+            <type>boolean</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.file.copy.expiration.secs</name>
+        <value>600</value>
+        <description>During upload/download with the master, how long an upload or download connection is idle
+            before nimbus considers it dead and drops the connection.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>ui.port</name>
+        <value>8744</value>
+        <description>Storm UI binds to this port.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>logviewer.port</name>
+        <value>8000</value>
+        <description>HTTP UI port for log viewer.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>logviewer.appender.name</name>
+        <value>A1</value>
+        <description>Appender name used by log viewer to determine log directory.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.port</name>
+        <value>3772</value>
+        <description>This port is used by Storm DRPC for receiving DPRC requests from clients.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.worker.threads</name>
+        <value>64</value>
+        <description>DRPC thrift server worker threads.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.queue.size</name>
+        <value>128</value>
+        <description>DRPC thrift server queue size.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.invocations.port</name>
+        <value>3773</value>
+        <description>This port on Storm DRPC is used by DRPC topologies to receive function invocations and send results back.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.request.timeout.secs</name>
+        <value>600</value>
+        <description>The timeout on DRPC requests within the DRPC server. Defaults to 10 minutes. Note that requests can also
+            timeout based on the socket timeout on the DRPC client, and separately based on the topology message
+            timeout for the topology implementing the DRPC function.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>transactional.zookeeper.root</name>
+        <value>/transactional</value>
+        <description>The root directory in ZooKeeper for metadata about TransactionalSpouts.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>transactional.zookeeper.servers</name>
+        <value>null</value>
+        <description>The list of zookeeper servers in which to keep the transactional state. If null (which is default),
+            will use storm.zookeeper.servers</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>transactional.zookeeper.port</name>
+        <value>null</value>
+        <description>The port to use to connect to the transactional zookeeper servers. If null (which is default),
+            will use storm.zookeeper.port</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>supervisor.slots.ports</name>
+        <value>[6700, 6701]</value>
+        <description>A list of ports that can run workers on this supervisor. Each worker uses one port, and
+            the supervisor will only run one worker per port. Use this configuration to tune
+            how many workers run on each machine.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>supervisor.worker.start.timeout.secs</name>
+        <value>120</value>
+        <description>How long a worker can go without heartbeating during the initial launch before
+            the supervisor tries to restart the worker process. This value override
+            supervisor.worker.timeout.secs during launch because there is additional
+            overhead to starting and configuring the JVM on launch.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>supervisor.worker.timeout.secs</name>
+        <value>30</value>
+        <description>How long a worker can go without heartbeating before the supervisor tries to restart the worker process.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>supervisor.monitor.frequency.secs</name>
+        <value>3</value>
+        <description>How often the supervisor checks the worker heartbeats to see if any of them need to be restarted.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>supervisor.heartbeat.frequency.secs</name>
+        <value>5</value>
+        <description>How often the supervisor sends a heartbeat to the master.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>worker.heartbeat.frequency.secs</name>
+        <value>1</value>
+        <description>How often this worker should heartbeat to the supervisor.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>task.heartbeat.frequency.secs</name>
+        <value>3</value>
+        <description>How often a task should heartbeat its status to the master.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>task.refresh.poll.secs</name>
+        <value>10</value>
+        <description>How often a task should sync its connections with other tasks (if a task is
+            reassigned, the other tasks sending messages to it need to refresh their connections).
+            In general though, when a reassignment happens other tasks will be notified
+            almost immediately. This configuration is here just in case that notification doesn't
+            come through.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zmq.threads</name>
+        <value>1</value>
+        <description>The number of threads that should be used by the zeromq context in each worker process.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zmq.linger.millis</name>
+        <value>5000</value>
+        <description>How long a connection should retry sending messages to a target host when
+            the connection is closed. This is an advanced configuration and can almost
+            certainly be ignored.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zmq.hwm</name>
+        <value>0</value>
+        <description>The high water for the ZeroMQ push sockets used for networking. Use this config to prevent buffer explosion
+            on the networking layer.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.server_worker_threads</name>
+        <value>1</value>
+        <description>Netty based messaging: The # of worker threads for the server.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.client_worker_threads</name>
+        <value>1</value>
+        <description>Netty based messaging: The # of worker threads for the client.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.buffer_size</name>
+        <value>5242880</value>
+        <description>Netty based messaging: The buffer size for send/recv buffer.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>bytes</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.max_retries</name>
+        <value>30</value>
+        <description>Netty based messaging: The max # of retries that a peer will perform when a remote is not accessible.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.max_wait_ms</name>
+        <value>1000</value>
+        <description>Netty based messaging: The max # of milliseconds that a peer will wait.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>ms</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.min_wait_ms</name>
+        <value>100</value>
+        <description>Netty based messaging: The min # of milliseconds that a peer will wait.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>ms</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.enable.message.timeouts</name>
+        <value>true</value>
+        <description>True if Storm should timeout messages or not. Defaults to true. This is meant to be used
+            in unit tests to prevent tuples from being accidentally timed out during the test.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.debug</name>
+        <value>false</value>
+        <description>When set to true, Storm will log every message that's emitted.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.optimize</name>
+        <value>true</value>
+        <description>Whether or not the master should optimize topologies by running multiple tasks in a single thread where appropriate.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.workers</name>
+        <value>1</value>
+        <description>How many processes should be spawned around the cluster to execute this
+            topology. Each process will execute some number of tasks as threads within
+            them. This parameter should be used in conjunction with the parallelism hints
+            on each component in the topology to tune the performance of a topology.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.acker.executors</name>
+        <value>null</value>
+        <description>How many executors to spawn for ackers.
+
+            If this is set to 0, then Storm will immediately ack tuples as soon
+            as they come off the spout, effectively disabling reliability.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.message.timeout.secs</name>
+        <value>30</value>
+        <description>The maximum amount of time given to the topology to fully process a message
+            emitted by a spout. If the message is not acked within this time frame, Storm
+            will fail the message on the spout. Some spouts implementations will then replay
+            the message at a later time.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.skip.missing.kryo.registrations</name>
+        <value>false</value>
+        <description> Whether or not Storm should skip the loading of kryo registrations for which it
+            does not know the class or have the serializer implementation. Otherwise, the task will
+            fail to load and will throw an error at runtime. The use case of this is if you want to
+            declare your serializations on the storm.yaml files on the cluster rather than every single
+            time you submit a topology. Different applications may use different serializations and so
+            a single application may not have the code for the other serializers used by other apps.
+            By setting this config to true, Storm will ignore that it doesn't have those other serializations
+            rather than throw an error.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.max.task.parallelism</name>
+        <value>null</value>
+        <description>The maximum parallelism allowed for a component in this topology. This configuration is
+            typically used in testing to limit the number of threads spawned in local mode.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.max.spout.pending</name>
+        <value>1000</value>
+        <description>The maximum number of tuples that can be pending on a spout task at any given time.
+            This config applies to individual tasks, not to spouts or topologies as a whole.
+
+            A pending tuple is one that has been emitted from a spout but has not been acked or failed yet.
+            Note that this config parameter has no effect for unreliable spouts that don't tag
+            their tuples with a message id.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.state.synchronization.timeout.secs</name>
+        <value>60</value>
+        <description>The maximum amount of time a component gives a source of state to synchronize before it requests
+            synchronization again.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.stats.sample.rate</name>
+        <value>0.05</value>
+        <description>The percentage of tuples to sample to produce stats for a task.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.builtin.metrics.bucket.size.secs</name>
+        <value>60</value>
+        <description>The time period that builtin metrics data in bucketed into.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.fall.back.on.java.serialization</name>
+        <value>true</value>
+        <description>Whether or not to use Java serialization in a topology.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.worker.childopts</name>
+        <value>null</value>
+        <description>Topology-specific options for the worker child process. This is used in addition to WORKER_CHILDOPTS.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.executor.receive.buffer.size</name>
+        <value>1024</value>
+        <description>The size of the Disruptor receive queue for each executor. Must be a power of 2.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.executor.send.buffer.size</name>
+        <value>1024</value>
+        <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.receiver.buffer.size</name>
+        <value>8</value>
+        <description>The maximum number of messages to batch from the thread receiving off the network to the
+            executor queues. Must be a power of 2.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.transfer.buffer.size</name>
+        <value>1024</value>
+        <description>The size of the Disruptor transfer queue for each worker.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.tick.tuple.freq.secs</name>
+        <value>null</value>
+        <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
+            to tasks. Meant to be used as a component-specific configuration.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.worker.shared.thread.pool.size</name>
+        <value>4</value>
+        <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
+            via the TopologyContext.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.disruptor.wait.strategy</name>
+        <value>com.lmax.disruptor.BlockingWaitStrategy</value>
+        <description>Configure the wait strategy used for internal queuing. Can be used to tradeoff latency
+            vs. throughput.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.executor.send.buffer.size</name>
+        <value>1024</value>
+        <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.receiver.buffer.size</name>
+        <value>8</value>
+        <description>The maximum number of messages to batch from the thread receiving off the network to the
+            executor queues. Must be a power of 2.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.transfer.buffer.size</name>
+        <value>1024</value>
+        <description>The size of the Disruptor transfer queue for each worker.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.tick.tuple.freq.secs</name>
+        <value>null</value>
+        <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
+            to tasks. Meant to be used as a component-specific configuration.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.worker.shared.thread.pool.size</name>
+        <value>4</value>
+        <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
+            via the TopologyContext.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>topology.sleep.spout.wait.strategy.time.ms</name>
+        <value>1</value>
+        <description>The amount of milliseconds the SleepEmptyEmitStrategy should sleep for.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.error.throttle.interval.secs</name>
+        <value>10</value>
+        <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example,
+            an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
+            reported to Zookeeper per task for every 10 second interval of time.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.max.error.report.per.interval</name>
+        <value>5</value>
+        <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example,
+            an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
+            reported to Zookeeper per task for every 10 second interval of time.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>topology.trident.batch.emit.interval.millis</name>
+        <value>500</value>
+        <description>How often a batch can be emitted in a Trident topology.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>dev.zookeeper.path</name>
+        <value>/tmp/dev-storm-zookeeper</value>
+        <description>The path to use as the zookeeper dir when running a zookeeper server via
+            "storm dev-zookeeper". This zookeeper instance is only intended for development;
+            it is not a production grade zookeeper setup.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+
+
+    <property>
+        <name>ui.childopts</name>
+        <value>-Xmx768m _JAAS_PLACEHOLDER</value>
+        <description>Childopts for Storm UI Java process.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>ui.filter</name>
+        <value>null</value>
+        <description>Class for Storm UI authentication</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>logviewer.childopts</name>
+        <value>-Xmx128m _JAAS_PLACEHOLDER</value>
+        <description>Childopts for log viewer java process.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.childopts</name>
+        <value>-Xmx768m _JAAS_PLACEHOLDER</value>
+        <description>Childopts for Storm DRPC Java process.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>_storm.min.ruid</name>
+        <value>null</value>
+        <description>min.user.id is set to the first real user id on the system. If value is 'null' than default value will be taken from key UID_MIN of /etc/login.defs otherwise the specified value will be used for all hosts.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.log.dir</name>
+        <value>{{log_dir}}</value>
+        <description>Log directory for Storm.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.authorizer</name>
+        <description>Log directory for Storm.</description>
+        <depends-on>
+            <property>
+                <type>ranger-storm-plugin-properties</type>
+                <name>ranger-storm-plugin-enabled</name>
+            </property>
+        </depends-on>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>nimbus.seeds</name>
+        <value>localhost</value>
+        <description>Comma-delimited list of the hosts running nimbus server.</description>
+        <value-attributes>
+            <type>componentHosts</type>
+            <editable-only-at-install>true</editable-only-at-install>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.thrift.threads</name>
+        <value>196</value>
+        <description>The number of threads that should be used by the nimbus thrift server.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.min.replication.count.default</name>
+        <value>1</value>
+        <description>Default minimum number of nimbus hosts where the code must be replicated before leader nimbus can mark the topology as active and create assignments. </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.min.replication.count</name>
+        <value>{{actual_topology_min_replication_count}}</value>
+        <description>Calculated minimum number of nimbus hosts where the code must be replicated before leader nimbus can mark the topology as active and create assignments. </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.max.replication.wait.time.sec.default</name>
+        <value>60</value>
+        <description>Default maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count. Once this time is elapsed nimbus will go ahead and perform topology activation tasks even if required nimbus.min.replication.count is not achieved</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.max.replication.wait.time.sec</name>
+        <value>{{actual_topology_max_replication_wait_time_sec}}</value>
+        <description>Calculated maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count. Once this time is elapsed nimbus will go ahead and perform topology activation tasks even if required nimbus.min.replication.count is not achieved</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+
+
+    <property>
+        <name>storm.thrift.transport</name>
+        <value>{{storm_thrift_transport}}</value>
+        <description>The transport plug-in that used for Thrift client/server communication.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>_storm.thrift.nonsecure.transport</name>
+        <value>org.apache.storm.security.auth.SimpleTransportPlugin</value>
+        <description>The transport plug-in that used for non-secure mode for for Thrift client/server communication.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>_storm.thrift.secure.transport</name>
+        <value>org.apache.storm.security.auth.kerberos.KerberosSaslTransportPlugin</value>
+        <description>The transport plug-in that used for secure mode for Thrift client/server communication.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.transport</name>
+        <value>org.apache.storm.messaging.netty.Context</value>
+        <description>The transporter for communication among Storm tasks.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.topology.validator</name>
+        <value>org.apache.storm.nimbus.DefaultTopologyValidator</value>
+        <description>A custom class that implements ITopologyValidator that is run whenever a
+            topology is submitted. Can be used to provide business-specific logic for
+            whether topologies are allowed to run or not.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.spout.wait.strategy</name>
+        <value>org.apache.storm.spout.SleepSpoutWaitStrategy</value>
+        <description>A class that implements a strategy for what to do when a spout needs to wait. Waiting is
+            triggered in one of two conditions:
+
+            1. nextTuple emits no tuples
+            2. The spout has hit maxSpoutPending and can't emit any more tuples</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.kryo.factory</name>
+        <value>org.apache.storm.serialization.DefaultKryoFactory</value>
+        <description>Class that specifies how to create a Kryo instance for serialization. Storm will then apply
+            topology.kryo.register and topology.kryo.decorators on top of this. The default implementation
+            implements topology.fall.back.on.java.serialization and turns references off.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.tuple.serializer</name>
+        <value>org.apache.storm.serialization.types.ListDelegateSerializer</value>
+        <description>The serializer class for ListDelegate (tuple payload).
+            The default serializer will be ListDelegateSerializer</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>client.jartransformer.class</name>
+        <description>Storm Topology backward comptability transformer</description>
+        <value>org.apache.storm.hack.StormShadeTransformer</value>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.impersonation.authorizer</name>
+        <description>
+            To ensure only authorized users can perform impersonation you should start nimbus with nimbus.impersonation.authorizer set to org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer.
+            A storm client may submit requests on behalf of another user. For example, if a userX submits an oozie workflow and as part of workflow execution if user oozie wants to submit a topology on behalf of userX it can do so by leveraging the impersonation feature.In order to submit topology as some other user , you can use StormSubmitter.submitTopologyAs API. Alternatively you can use NimbusClient.getConfiguredClientAs to get a nimbus client as some other user and perform any nimbus action(i.e. kill/rebalance/activate/deactivate) using this client.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.impersonation.acl</name>
+        <description>
+            The ImpersonationAuthorizer uses nimbus.impersonation.acl as the acl to authorize users. Following is a sample nimbus config for supporting impersonation:
+            nimbus.impersonation.acl:
+            impersonating_user1:
+            hosts:
+            [comma separated list of hosts from which impersonating_user1 is allowed to impersonate other users]
+            groups:
+            [comma separated list of groups whose users impersonating_user1 is allowed to impersonate]
+            impersonating_user2:
+            hosts:
+            [comma separated list of hosts from which impersonating_user2 is allowed to impersonate other users]
+            groups:
+            [comma separated list of groups whose users impersonating_user2 is allowed to impersonate]
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <!-- Deleted configs. -->
+
+    <property>
+        <name>storm.cluster.metrics.consumer.register</name>
+        <value>[{"class": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter"}]</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.metrics.consumer.register</name>
+        <value>[{"class": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink", "parallelism.hint": 1, "whitelist": ["kafkaOffset\\..+/", "__complete-latency", "__process-latency", "__receive\\.population$", "__sendqueue\\.population$", "__execute-count", "__emit-count", "__ack-count", "__fail-count", "memory/heap\\.usedBytes$", "memory/nonHeap\\.usedBytes$", "GC/.+\\.count$", "GC/.+\\.timeMs$"]}]</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.metrics.aggregate.per.worker</name>
+        <value>true</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.metrics.aggregate.metric.evict.secs</name>
+        <value>5</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.metrics.expand.map.type</name>
+        <value>true</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.metrics.metric.name.separator</name>
+        <value>.</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>java.library.path</name>
+        <value>/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm-client/lib</value>
+        <description>This value is passed to spawned JVMs (e.g., Nimbus, Supervisor, and Workers)
+            for the java.library.path value. java.library.path tells the JVM where
+            to look for native libraries. It is necessary to set this config correctly since
+            Storm uses the ZeroMQ and JZMQ native libs. </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.childopts</name>
+        <value>-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM</value>
+        <description>This parameter is used by the storm-deploy project to configure the jvm options for the nimbus daemon.</description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>worker.childopts</name>
+        <value>-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM</value>
+        <description>The jvm opts provided to workers launched by this supervisor. All \"%ID%\" substrings are replaced with an identifier for this worker.</description>
+        <value-attributes>
+            <type>multiLine</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>supervisor.childopts</name>
+        <value>-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port={{jmxremote_port}} -javaagent:/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM</value>
+        <description>This parameter is used by the storm-deploy project to configure the jvm options for the supervisor daemon.</description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+</configuration>