You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2015/10/23 16:47:15 UTC

[01/50] [abbrv] ambari git commit: AMBARI-13503 Recommendation popup shows incorrect current value. (ababiichuk)

Repository: ambari
Updated Branches:
  refs/heads/branch-dev-patch-upgrade 3265d0b8f -> b25c8a848


AMBARI-13503 Recommendation popup shows incorrect current value. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b60b3af8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b60b3af8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b60b3af8

Branch: refs/heads/branch-dev-patch-upgrade
Commit: b60b3af8bf208b969338c96ff5fd297319f99501
Parents: 28106d3
Author: aBabiichuk <ab...@cybervisiontech.com>
Authored: Wed Oct 21 16:45:19 2015 +0300
Committer: aBabiichuk <ab...@cybervisiontech.com>
Committed: Wed Oct 21 16:49:09 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/mixins/common/configs/enhanced_configs.js | 2 --
 1 file changed, 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b60b3af8/ambari-web/app/mixins/common/configs/enhanced_configs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/configs/enhanced_configs.js b/ambari-web/app/mixins/common/configs/enhanced_configs.js
index bee7ada..009a775 100644
--- a/ambari-web/app/mixins/common/configs/enhanced_configs.js
+++ b/ambari-web/app/mixins/common/configs/enhanced_configs.js
@@ -463,8 +463,6 @@ App.EnhancedConfigsMixin = Em.Mixin.create({
             initialValue = override ? override.get('savedValue') : cp && cp.get('savedValue');
           }
 
-
-          initialValue = Em.isNone(initialValue) ? value : initialValue;
           var recommendedValue = configObject[key].properties[propertyName];
 
           var isNewProperty = (!notDefaultGroup && Em.isNone(cp)) || (notDefaultGroup && group && Em.isNone(override));


[39/50] [abbrv] ambari git commit: AMBARI-13517. Ambari Server JVM crashed after several clicks in Web UI to navigate graph timerange. ClassCastException fix. (swagle)

Posted by nc...@apache.org.
AMBARI-13517. Ambari Server JVM crashed after several clicks in Web UI to navigate graph timerange. ClassCastException fix. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/049934f9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/049934f9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/049934f9

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 049934f9c485ad53905e079f474539a83288c944
Parents: 6cd152c
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Thu Oct 22 18:30:44 2015 -0700
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Thu Oct 22 18:31:11 2015 -0700

----------------------------------------------------------------------
 .../metrics/timeline/PhoenixHBaseAccessor.java                     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/049934f9/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
index 1ed2a72..06ae292 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
@@ -109,7 +109,7 @@ public class PhoenixHBaseAccessor {
 
   private static final TimelineMetricReadHelper TIMELINE_METRIC_READ_HELPER = new TimelineMetricReadHelper();
   private static ObjectMapper mapper = new ObjectMapper();
-  private static TypeReference<Map<Long, Double>> metricValuesTypeRef = new TypeReference<Map<Long, Double>>() {};
+  private static TypeReference<TreeMap<Long, Double>> metricValuesTypeRef = new TypeReference<TreeMap<Long, Double>>() {};
 
   private final Configuration hbaseConf;
   private final Configuration metricsConf;


[19/50] [abbrv] ambari git commit: AMBARI-13523. Ambari should expose storm log4j configs (aonishuk)

Posted by nc...@apache.org.
AMBARI-13523. Ambari should expose storm log4j configs (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/565dc0de
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/565dc0de
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/565dc0de

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 565dc0de86fc8954cc3331941f3b86aaa52d4d42
Parents: 1f702fb
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Thu Oct 22 15:56:33 2015 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Thu Oct 22 15:57:00 2015 +0300

----------------------------------------------------------------------
 .../STORM/0.9.1.2.1/configuration/storm-env.xml |  6 ++++++
 .../0.9.1.2.1/package/scripts/params_linux.py   |  5 ++++-
 .../STORM/0.9.1.2.1/package/scripts/storm.py    | 12 ++++++++++-
 .../HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml | 13 ++++++++++++
 .../HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml | 12 +++++++++++
 .../stacks/HDP/2.2/upgrades/upgrade-2.3.xml     |  6 ++++++
 .../services/STORM/configuration/storm-env.xml  |  5 +++++
 .../stacks/HDP/2.3/upgrades/config-upgrade.xml  | 13 ++++++++++++
 .../catalog/UpgradeCatalog_2.1_to_2.3.json      | 21 +++++++++++++++++++-
 .../catalog/UpgradeCatalog_2.2_to_2.3.json      | 21 +++++++++++++++++++-
 .../stacks/2.1/configs/default-storm-start.json |  3 ++-
 .../test/python/stacks/2.1/configs/default.json |  3 ++-
 .../stacks/2.1/configs/secured-storm-start.json |  3 ++-
 .../test/python/stacks/2.1/configs/secured.json |  3 ++-
 14 files changed, 118 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/565dc0de/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/configuration/storm-env.xml b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/configuration/storm-env.xml
index 7e36a35..02a9197 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/configuration/storm-env.xml
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/configuration/storm-env.xml
@@ -75,5 +75,11 @@ export STORM_HOME={{storm_component_home_dir}}
      <value>false</value>
      <description></description>
    </property>
+   <property>
+     <name>storm_logs_supported</name>
+     <value>false</value>
+     <description></description>
+   </property>
+
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/565dc0de/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
index df65185..c1f118f 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
@@ -57,7 +57,7 @@ if stack_is_hdp22_or_further:
   rest_lib_dir = format("{storm_component_home_dir}/contrib/storm-rest")
   storm_bin_dir = format("{storm_component_home_dir}/bin")
   storm_lib_dir = format("{storm_component_home_dir}/lib")
-
+  log4j_dir = format("{storm_component_home_dir}/log4j2")
 
 storm_user = config['configurations']['storm-env']['storm_user']
 log_dir = config['configurations']['storm-env']['storm_log_dir']
@@ -70,6 +70,7 @@ nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
 storm_zookeeper_root_dir = default('/configurations/storm-site/storm.zookeeper.root', None)
 storm_zookeeper_servers = config['configurations']['storm-site']['storm.zookeeper.servers']
 storm_zookeeper_port = config['configurations']['storm-site']['storm.zookeeper.port']
+storm_logs_supported = config['configurations']['storm-env']['storm_logs_supported']
 
 # nimbus.seeds is supported in HDP 2.3.0.0 and higher
 nimbus_seeds_supported = default('/configurations/storm-env/nimbus_seeds_supported', False)
@@ -174,6 +175,8 @@ repo_config_username = config['configurations']['ranger-storm-plugin-properties'
 ranger_env = config['configurations']['ranger-env']
 ranger_plugin_properties = config['configurations']['ranger-storm-plugin-properties']
 policy_user = config['configurations']['ranger-storm-plugin-properties']['policy_user']
+storm_cluster_log4j_content = config['configurations']['storm-cluster-log4j']['content']
+storm_worker_log4j_content = config['configurations']['storm-worker-log4j']['content']
 
 # some commands may need to supply the JAAS location when running as storm
 storm_jaas_file = format("{conf_dir}/storm_jaas.conf")

http://git-wip-us.apache.org/repos/asf/ambari/blob/565dc0de/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/storm.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/storm.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/storm.py
index ac5c9f8..2e9557a 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/storm.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/storm.py
@@ -106,7 +106,17 @@ def storm(name=None):
     owner=params.storm_user,
     content=InlineTemplate(params.storm_env_sh_template)
   )
-
+  
+  if params.storm_logs_supported:
+    File(format("{log4j_dir}/cluster.xml"),
+      owner=params.storm_user,
+      content=InlineTemplate(params.storm_cluster_log4j_content)
+    )
+    File(format("{log4j_dir}/worker.xml"),
+      owner=params.storm_user,
+      content=InlineTemplate(params.storm_worker_log4j_content)
+    )
+  
   if params.security_enabled:
     TemplateConfig(format("{conf_dir}/storm_jaas.conf"),
                    owner=params.storm_user

http://git-wip-us.apache.org/repos/asf/ambari/blob/565dc0de/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
index a23fe3d..a71c379 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
@@ -270,6 +270,19 @@
       <execute-stage service="STORM" component="NIMBUS">
         <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_convert_nimbus_host_to_seeds"/>
       </execute-stage>
+      
+      <execute-stage service="STORM" component="NIMBUS">
+        <task xsi:type="configure" id="hdp_2_3_0_0_update_storm_env"/>
+      </execute-stage>
+      
+      <execute-stage service="STORM" component="NIMBUS">
+        <task xsi:type="configure" id="hdp_2_3_0_0_add_storm_cluster_logs_content"/>
+      </execute-stage>
+      
+      <execute-stage service="STORM" component="NIMBUS">
+        <task xsi:type="configure" id="hdp_2_3_0_0_add_storm_worker_logs_content"/>
+      </execute-stage>
+
 
       <execute-stage service="STORM" component="NIMBUS">
         <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_update_env_vars"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/565dc0de/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
index d71387a..9997c12 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
@@ -414,6 +414,18 @@
       <execute-stage service="STORM" component="NIMBUS">
         <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_convert_nimbus_host_to_seeds"/>
       </execute-stage>
+      
+      <execute-stage service="STORM" component="NIMBUS">
+        <task xsi:type="configure" id="hdp_2_3_0_0_update_storm_env"/>
+      </execute-stage>
+      
+      <execute-stage service="STORM" component="NIMBUS">
+        <task xsi:type="configure" id="hdp_2_3_0_0_add_storm_cluster_logs_content"/>
+      </execute-stage>
+      
+      <execute-stage service="STORM" component="NIMBUS">
+        <task xsi:type="configure" id="hdp_2_3_0_0_add_storm_worker_logs_content"/>
+      </execute-stage>
 
       <execute-stage service="STORM" component="NIMBUS">
         <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_update_env_vars"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/565dc0de/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
index 233d86b..1a90991 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
@@ -790,6 +790,12 @@
           <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_monitor_freq_adjustment"/>
 
           <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_convert_nimbus_host_to_seeds"/>
+          
+          <task xsi:type="configure" id="hdp_2_3_0_0_update_storm_env"/>
+          
+          <task xsi:type="configure" id="hdp_2_3_0_0_add_storm_cluster_logs_content"/>
+          
+          <task xsi:type="configure" id="hdp_2_3_0_0_add_storm_worker_logs_content"/>
 
           <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_update_env_vars"/>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/565dc0de/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-env.xml
index 1d33bd1..95a241b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-env.xml
@@ -26,4 +26,9 @@
     <value>true</value>
     <description></description>
   </property>
+   <property>
+     <name>storm_logs_supported</name>
+     <value>true</value>
+     <description></description>
+   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/565dc0de/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
index 48f5d50..fe8e1fe 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
@@ -724,6 +724,19 @@
     <service name="STORM">
       <component name="NIMBUS">
         <changes>
+          <definition id="hdp_2_3_0_0_update_storm_env">
+            <type>storm-env</type>
+            <set key="storm_logs_supported" value="true" />
+          </definition>
+          <definition id="hdp_2_3_0_0_add_storm_cluster_logs_content">
+            <type>storm-cluster-log4j</type>
+            <set key="content" value="\n    \n&lt;!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \&quot;License\&quot;); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \&quot;AS IS\&quot; BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n--&gt;\n\n&lt;configuration monitorInterval=\&quot;60\&quot;&gt;\n&lt;properties&gt;\n    &lt;property name=\&quot;pattern\&quot;&gt;%d{y
 yyy-MM-dd HH:mm:ss.SSS} %c{1.} [%p] %msg%n&lt;/property&gt;\n    &lt;property name=\&quot;patternMetris\&quot;&gt;%d %-8r %m%n&lt;/property&gt;\n&lt;/properties&gt;\n&lt;appenders&gt;\n    &lt;RollingFile name=\&quot;A1\&quot;\n                 fileName=\&quot;${sys:storm.log.dir}/${sys:logfile.name}\&quot;\n                 filePattern=\&quot;${sys:storm.log.dir}/${sys:logfile.name}.%i\&quot;&gt;\n        &lt;PatternLayout&gt;\n            &lt;pattern&gt;${pattern}&lt;/pattern&gt;\n        &lt;/PatternLayout&gt;\n        &lt;Policies&gt;\n            &lt;SizeBasedTriggeringPolicy size=\&quot;100 MB\&quot;/&gt; &lt;!-- Or every 100 MB --&gt;\n        &lt;/Policies&gt;\n        &lt;DefaultRolloverStrategy max=\&quot;9\&quot;/&gt;\n    &lt;/RollingFile&gt;\n    &lt;RollingFile name=\&quot;ACCESS\&quot;\n                 fileName=\&quot;${sys:storm.log.dir}/access.log\&quot;\n                 filePattern=\&quot;${sys:storm.log.dir}/access.log.%i\&quot;&gt;\n        &lt;PatternLayout&gt
 ;\n            &lt;pattern&gt;${pattern}&lt;/pattern&gt;\n        &lt;/PatternLayout&gt;\n        &lt;Policies&gt;\n            &lt;SizeBasedTriggeringPolicy size=\&quot;100 MB\&quot;/&gt; &lt;!-- Or every 100 MB --&gt;\n        &lt;/Policies&gt;\n        &lt;DefaultRolloverStrategy max=\&quot;9\&quot;/&gt;\n    &lt;/RollingFile&gt;\n    &lt;RollingFile name=\&quot;METRICS\&quot;\n                 fileName=\&quot;${sys:storm.log.dir}/metrics.log\&quot;\n                 filePattern=\&quot;${sys:storm.log.dir}/metrics.log.%i\&quot;&gt;\n        &lt;PatternLayout&gt;\n            &lt;pattern&gt;${patternMetris}&lt;/pattern&gt;\n        &lt;/PatternLayout&gt;\n        &lt;Policies&gt;\n            &lt;SizeBasedTriggeringPolicy size=\&quot;2 MB\&quot;/&gt; &lt;!-- Or every 100 MB --&gt;\n        &lt;/Policies&gt;\n        &lt;DefaultRolloverStrategy max=\&quot;9\&quot;/&gt;\n    &lt;/RollingFile&gt;\n    &lt;Syslog name=\&quot;syslog\&quot; format=\&quot;RFC5424\&quot; host=\&quot;local
 host\&quot; port=\&quot;514\&quot;\n            protocol=\&quot;UDP\&quot; appName=\&quot;[${sys:daemon.name}]\&quot; mdcId=\&quot;mdc\&quot; includeMDC=\&quot;true\&quot;\n            facility=\&quot;LOCAL5\&quot; enterpriseNumber=\&quot;18060\&quot; newLine=\&quot;true\&quot; exceptionPattern=\&quot;%rEx{full}\&quot;\n            messageId=\&quot;[${sys:user.name}:S0]\&quot; id=\&quot;storm\&quot;/&gt;\n&lt;/appenders&gt;\n&lt;loggers&gt;\n\n    &lt;Logger name=\&quot;backtype.storm.security.auth.authorizer\&quot; level=\&quot;info\&quot;&gt;\n        &lt;AppenderRef ref=\&quot;ACCESS\&quot;/&gt;\n    &lt;/Logger&gt;\n    &lt;Logger name=\&quot;backtype.storm.metric.LoggingMetricsConsumer\&quot; level=\&quot;info\&quot;&gt;\n        &lt;AppenderRef ref=\&quot;METRICS\&quot;/&gt;\n    &lt;/Logger&gt;\n    &lt;root level=\&quot;info\&quot;&gt; &lt;!-- We log everything --&gt;\n        &lt;appender-ref ref=\&quot;A1\&quot;/&gt;\n        &lt;appender-ref ref=\&quot;syslog\&quot;/&gt;\
 n    &lt;/root&gt;\n&lt;/loggers&gt;\n&lt;/configuration&gt;\n    \n    &quot;"/>
+          </definition>
+          <definition id="hdp_2_3_0_0_add_storm_worker_logs_content">
+            <type>storm-worker-log4j</type>
+            <set key="content" value="\n    \n&lt;!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \&quot;License\&quot;); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \&quot;AS IS\&quot; BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n--&gt;\n\n&lt;configuration monitorInterval=\&quot;60\&quot;&gt;\n&lt;properties&gt;\n    &lt;property name=\&quot;pattern\&quot;&gt;%d{y
 yyy-MM-dd HH:mm:ss.SSS} %c{1.} [%p] %msg%n&lt;/property&gt;\n    &lt;property name=\&quot;patternNoTime\&quot;&gt;%msg%n&lt;/property&gt;\n&lt;/properties&gt;\n&lt;appenders&gt;\n    &lt;RollingFile name=\&quot;A1\&quot;\n                 fileName=\&quot;${sys:storm.log.dir}/${sys:logfile.name}\&quot;\n                 filePattern=\&quot;${sys:storm.log.dir}/${sys:logfile.name}.%i.gz\&quot;&gt;\n        &lt;PatternLayout&gt;\n            &lt;pattern&gt;${pattern}&lt;/pattern&gt;\n        &lt;/PatternLayout&gt;\n        &lt;Policies&gt;\n            &lt;SizeBasedTriggeringPolicy size=\&quot;100 MB\&quot;/&gt; &lt;!-- Or every 100 MB --&gt;\n        &lt;/Policies&gt;\n        &lt;DefaultRolloverStrategy max=\&quot;9\&quot;/&gt;\n    &lt;/RollingFile&gt;\n    &lt;RollingFile name=\&quot;STDOUT\&quot;\n                 fileName=\&quot;${sys:storm.log.dir}/${sys:logfile.name}.out\&quot;\n                 filePattern=\&quot;${sys:storm.log.dir}/${sys:logfile.name}.out.%i.gz\&quot;&gt;\n  
       &lt;PatternLayout&gt;\n            &lt;pattern&gt;${patternNoTime}&lt;/pattern&gt;\n        &lt;/PatternLayout&gt;\n        &lt;Policies&gt;\n            &lt;SizeBasedTriggeringPolicy size=\&quot;100 MB\&quot;/&gt; &lt;!-- Or every 100 MB --&gt;\n        &lt;/Policies&gt;\n        &lt;DefaultRolloverStrategy max=\&quot;4\&quot;/&gt;\n    &lt;/RollingFile&gt;\n    &lt;RollingFile name=\&quot;STDERR\&quot;\n                 fileName=\&quot;${sys:storm.log.dir}/${sys:logfile.name}.err\&quot;\n                 filePattern=\&quot;${sys:storm.log.dir}/${sys:logfile.name}.err.%i.gz\&quot;&gt;\n        &lt;PatternLayout&gt;\n            &lt;pattern&gt;${patternNoTime}&lt;/pattern&gt;\n        &lt;/PatternLayout&gt;\n        &lt;Policies&gt;\n            &lt;SizeBasedTriggeringPolicy size=\&quot;100 MB\&quot;/&gt; &lt;!-- Or every 100 MB --&gt;\n        &lt;/Policies&gt;\n        &lt;DefaultRolloverStrategy max=\&quot;4\&quot;/&gt;\n    &lt;/RollingFile&gt;\n    &lt;Syslog name=\&quot;
 syslog\&quot; format=\&quot;RFC5424\&quot; host=\&quot;localhost\&quot; port=\&quot;514\&quot;\n        protocol=\&quot;UDP\&quot; appName=\&quot;[${sys:storm.id}:${sys:worker.port}]\&quot; mdcId=\&quot;mdc\&quot; includeMDC=\&quot;true\&quot;\n        facility=\&quot;LOCAL5\&quot; enterpriseNumber=\&quot;18060\&quot; newLine=\&quot;true\&quot; exceptionPattern=\&quot;%rEx{full}\&quot;\n        messageId=\&quot;[${sys:user.name}:${sys:logging.sensitivity}]\&quot; id=\&quot;storm\&quot;/&gt;\n&lt;/appenders&gt;\n&lt;loggers&gt;\n    &lt;root level=\&quot;info\&quot;&gt; &lt;!-- We log everything --&gt;\n        &lt;appender-ref ref=\&quot;A1\&quot;/&gt;\n        &lt;appender-ref ref=\&quot;syslog\&quot;/&gt;\n    &lt;/root&gt;\n    &lt;Logger name=\&quot;STDERR\&quot; level=\&quot;INFO\&quot;&gt;\n        &lt;appender-ref ref=\&quot;STDERR\&quot;/&gt;\n        &lt;appender-ref ref=\&quot;syslog\&quot;/&gt;\n    &lt;/Logger&gt;\n    &lt;Logger name=\&quot;STDOUT\&quot; level=\&quot;IN
 FO\&quot;&gt;\n        &lt;appender-ref ref=\&quot;STDOUT\&quot;/&gt;\n        &lt;appender-ref ref=\&quot;syslog\&quot;/&gt;\n    &lt;/Logger&gt;\n&lt;/loggers&gt;\n&lt;/configuration&gt;\n    \n"/>
+          </definition>
+          
           <definition xsi:type="configure" id="hdp_2_3_0_0_nimbus_monitor_freq_adjustment">
             <condition type="storm-site" key="nimbus.monitor.freq.secs" value="10">
               <type>storm-site</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/565dc0de/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json
index e138643..486a856 100644
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json
+++ b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json
@@ -53,12 +53,31 @@
             "required-services": [
               "STORM"
             ]
+          },
+          "storm-cluster-log4j": {
+            "merged-copy": "yes",
+            "required-services": [
+              "STORM"
+            ]
+          },
+          "storm-worker-log4j": {
+            "merged-copy": "yes",
+            "required-services": [
+              "STORM"
+            ]
           }
         }
       },
       "properties": {
         "storm-env": {
-          "nimbus_seeds_supported": "true"
+          "nimbus_seeds_supported": "true",
+          "storm_logs_supported": "true"
+        },
+        "storm-cluster-log4j": {
+          "content": "\n    \n<!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-->\n\n<configuration monitorInterval=\"60\">\n<properties>\n    <property name=\"pattern\">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} [%p] %msg%n</property>\n    <property name=\"patt
 ernMetris\">%d %-8r %m%n</property>\n</properties>\n<appenders>\n    <RollingFile name=\"A1\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.%i\">\n        <PatternLayout>\n            <pattern>${pattern}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <RollingFile name=\"ACCESS\"\n                 fileName=\"${sys:storm.log.dir}/access.log\"\n                 filePattern=\"${sys:storm.log.dir}/access.log.%i\">\n        <PatternLayout>\n            <pattern>${pattern}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <RollingFile
  name=\"METRICS\"\n                 fileName=\"${sys:storm.log.dir}/metrics.log\"\n                 filePattern=\"${sys:storm.log.dir}/metrics.log.%i\">\n        <PatternLayout>\n            <pattern>${patternMetris}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"2 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <Syslog name=\"syslog\" format=\"RFC5424\" host=\"localhost\" port=\"514\"\n            protocol=\"UDP\" appName=\"[${sys:daemon.name}]\" mdcId=\"mdc\" includeMDC=\"true\"\n            facility=\"LOCAL5\" enterpriseNumber=\"18060\" newLine=\"true\" exceptionPattern=\"%rEx{full}\"\n            messageId=\"[${sys:user.name}:S0]\" id=\"storm\"/>\n</appenders>\n<loggers>\n\n    <Logger name=\"backtype.storm.security.auth.authorizer\" level=\"info\">\n        <AppenderRef ref=\"ACCESS\"/>\n    </Logger>\n    <Logger name=\"backtype.storm.metric.Loggi
 ngMetricsConsumer\" level=\"info\">\n        <AppenderRef ref=\"METRICS\"/>\n    </Logger>\n    <root level=\"info\"> <!-- We log everything -->\n        <appender-ref ref=\"A1\"/>\n        <appender-ref ref=\"syslog\"/>\n    </root>\n</loggers>\n</configuration>\n    \n    "
+        },
+        "storm-worker-log4j": {
+          "content": "\n    \n<!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-->\n\n<configuration monitorInterval=\"60\">\n<properties>\n    <property name=\"pattern\">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} [%p] %msg%n</property>\n    <property name=\"patt
 ernNoTime\">%msg%n</property>\n</properties>\n<appenders>\n    <RollingFile name=\"A1\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.%i.gz\">\n        <PatternLayout>\n            <pattern>${pattern}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <RollingFile name=\"STDOUT\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}.out\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.out.%i.gz\">\n        <PatternLayout>\n            <pattern>${patternNoTime}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"4\"/>\n    
 </RollingFile>\n    <RollingFile name=\"STDERR\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}.err\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.err.%i.gz\">\n        <PatternLayout>\n            <pattern>${patternNoTime}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"4\"/>\n    </RollingFile>\n    <Syslog name=\"syslog\" format=\"RFC5424\" host=\"localhost\" port=\"514\"\n        protocol=\"UDP\" appName=\"[${sys:storm.id}:${sys:worker.port}]\" mdcId=\"mdc\" includeMDC=\"true\"\n        facility=\"LOCAL5\" enterpriseNumber=\"18060\" newLine=\"true\" exceptionPattern=\"%rEx{full}\"\n        messageId=\"[${sys:user.name}:${sys:logging.sensitivity}]\" id=\"storm\"/>\n</appenders>\n<loggers>\n    <root level=\"info\"> <!-- We log everything -->\n        <appender-ref ref=\"A1\"/>\n  
       <appender-ref ref=\"syslog\"/>\n    </root>\n    <Logger name=\"STDERR\" level=\"INFO\">\n        <appender-ref ref=\"STDERR\"/>\n        <appender-ref ref=\"syslog\"/>\n    </Logger>\n    <Logger name=\"STDOUT\" level=\"INFO\">\n        <appender-ref ref=\"STDOUT\"/>\n        <appender-ref ref=\"syslog\"/>\n    </Logger>\n</loggers>\n</configuration>\n    \n    "
         },
         "storm-site": {
           "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port={{jmxremote_port}} -javaagent:/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM",

http://git-wip-us.apache.org/repos/asf/ambari/blob/565dc0de/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3.json
index ebcfb52..40c345d 100644
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3.json
+++ b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3.json
@@ -316,12 +316,31 @@
             "required-services": [
               "HIVE"
             ]
+          },
+          "storm-cluster-log4j": {
+            "merged-copy": "yes",
+            "required-services": [
+              "STORM"
+            ]
+          },
+          "storm-worker-log4j": {
+            "merged-copy": "yes",
+            "required-services": [
+              "STORM"
+            ]
           }
         }
       },
       "properties": {
         "storm-env": {
-          "nimbus_seeds_supported": "true"
+          "nimbus_seeds_supported": "true",
+          "storm_logs_supported": "true"
+        },
+        "storm-cluster-log4j": {
+          "content": "\n    \n<!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-->\n\n<configuration monitorInterval=\"60\">\n<properties>\n    <property name=\"pattern\">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} [%p] %msg%n</property>\n    <property name=\"patt
 ernMetris\">%d %-8r %m%n</property>\n</properties>\n<appenders>\n    <RollingFile name=\"A1\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.%i\">\n        <PatternLayout>\n            <pattern>${pattern}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <RollingFile name=\"ACCESS\"\n                 fileName=\"${sys:storm.log.dir}/access.log\"\n                 filePattern=\"${sys:storm.log.dir}/access.log.%i\">\n        <PatternLayout>\n            <pattern>${pattern}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <RollingFile
  name=\"METRICS\"\n                 fileName=\"${sys:storm.log.dir}/metrics.log\"\n                 filePattern=\"${sys:storm.log.dir}/metrics.log.%i\">\n        <PatternLayout>\n            <pattern>${patternMetris}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"2 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <Syslog name=\"syslog\" format=\"RFC5424\" host=\"localhost\" port=\"514\"\n            protocol=\"UDP\" appName=\"[${sys:daemon.name}]\" mdcId=\"mdc\" includeMDC=\"true\"\n            facility=\"LOCAL5\" enterpriseNumber=\"18060\" newLine=\"true\" exceptionPattern=\"%rEx{full}\"\n            messageId=\"[${sys:user.name}:S0]\" id=\"storm\"/>\n</appenders>\n<loggers>\n\n    <Logger name=\"backtype.storm.security.auth.authorizer\" level=\"info\">\n        <AppenderRef ref=\"ACCESS\"/>\n    </Logger>\n    <Logger name=\"backtype.storm.metric.Loggi
 ngMetricsConsumer\" level=\"info\">\n        <AppenderRef ref=\"METRICS\"/>\n    </Logger>\n    <root level=\"info\"> <!-- We log everything -->\n        <appender-ref ref=\"A1\"/>\n        <appender-ref ref=\"syslog\"/>\n    </root>\n</loggers>\n</configuration>\n    \n    "
+        },
+        "storm-worker-log4j": {
+          "content": "\n    \n<!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-->\n\n<configuration monitorInterval=\"60\">\n<properties>\n    <property name=\"pattern\">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} [%p] %msg%n</property>\n    <property name=\"patt
 ernNoTime\">%msg%n</property>\n</properties>\n<appenders>\n    <RollingFile name=\"A1\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.%i.gz\">\n        <PatternLayout>\n            <pattern>${pattern}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <RollingFile name=\"STDOUT\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}.out\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.out.%i.gz\">\n        <PatternLayout>\n            <pattern>${patternNoTime}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"4\"/>\n    
 </RollingFile>\n    <RollingFile name=\"STDERR\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}.err\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.err.%i.gz\">\n        <PatternLayout>\n            <pattern>${patternNoTime}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"4\"/>\n    </RollingFile>\n    <Syslog name=\"syslog\" format=\"RFC5424\" host=\"localhost\" port=\"514\"\n        protocol=\"UDP\" appName=\"[${sys:storm.id}:${sys:worker.port}]\" mdcId=\"mdc\" includeMDC=\"true\"\n        facility=\"LOCAL5\" enterpriseNumber=\"18060\" newLine=\"true\" exceptionPattern=\"%rEx{full}\"\n        messageId=\"[${sys:user.name}:${sys:logging.sensitivity}]\" id=\"storm\"/>\n</appenders>\n<loggers>\n    <root level=\"info\"> <!-- We log everything -->\n        <appender-ref ref=\"A1\"/>\n  
       <appender-ref ref=\"syslog\"/>\n    </root>\n    <Logger name=\"STDERR\" level=\"INFO\">\n        <appender-ref ref=\"STDERR\"/>\n        <appender-ref ref=\"syslog\"/>\n    </Logger>\n    <Logger name=\"STDOUT\" level=\"INFO\">\n        <appender-ref ref=\"STDOUT\"/>\n        <appender-ref ref=\"syslog\"/>\n    </Logger>\n</loggers>\n</configuration>\n    \n    "
         },
         "falcon-startup.properties": {
           "*.shared.libs": "activemq-core,ant,geronimo-j2ee-management,jms,json-simple,oozie-client,spring-jms,commons-lang3,commons-el",

http://git-wip-us.apache.org/repos/asf/ambari/blob/565dc0de/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json b/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
index d3cc980..bfde096 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
@@ -214,7 +214,8 @@
             "storm_log_dir": "/var/log/storm",
             "storm_pid_dir": "/var/run/storm",
             "storm_user": "storm",
-            "nimbus_seeds_supported" : false
+            "nimbus_seeds_supported" : "false",
+            "storm_logs_supported": "false"
         },
 		"ranger-storm-plugin-properties": {
             "POLICY_MGR_URL": "{{policymgr_mgr_url}}", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/565dc0de/ambari-server/src/test/python/stacks/2.1/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/default.json b/ambari-server/src/test/python/stacks/2.1/configs/default.json
index f878aad..76d840b 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/default.json
@@ -654,7 +654,8 @@
             "storm_log_dir": "/var/log/storm", 
             "storm_pid_dir": "/var/run/storm", 
             "storm_user": "storm",
-            "nimbus_seeds_supported" : false
+            "nimbus_seeds_supported" : "false",
+            "storm_logs_supported": "false"
         }, 
         "falcon-env": {
             "falcon_port": "15000", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/565dc0de/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
index 0b1ea9e..f9e518d 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
@@ -225,7 +225,8 @@
             "nimbus_keytab": "/etc/security/keytabs/nimbus.service.keytab", 
             "storm_keytab": "/etc/security/keytabs/storm.headless.keytab",
             "storm_ui_principal_name": "HTTP/_HOST",
-            "nimbus_seeds_supported" : false
+            "nimbus_seeds_supported" : "false",
+            "storm_logs_supported": "false"
         },
         "ranger-storm-plugin-properties": {
             "POLICY_MGR_URL": "{{policymgr_mgr_url}}", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/565dc0de/ambari-server/src/test/python/stacks/2.1/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured.json b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
index 193012e..3346ee6 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
@@ -646,7 +646,8 @@
             "storm_principal_name": "storm",
             "storm_ui_keytab" : "/etc/security/keytabs/spnego.service.keytab",
             "storm_ui_principal_name" : "HTTP/_HOST",
-            "nimbus_seeds_supported" : false
+            "nimbus_seeds_supported" : "false",
+            "storm_logs_supported": "false"
         }, 
         "falcon-env": {
             "falcon_port": "15000", 


[21/50] [abbrv] ambari git commit: AMBARI-13509. hdfs disk usage metric confusing

Posted by nc...@apache.org.
AMBARI-13509. hdfs disk usage metric confusing


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/306e44a4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/306e44a4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/306e44a4

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 306e44a48ebffae9df20d5f6cae561c8c50173b2
Parents: 39c04ac
Author: Alex Antonenko <hi...@gmail.com>
Authored: Thu Oct 22 16:22:59 2015 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Thu Oct 22 16:22:59 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/messages.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/306e44a4/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index c12ba24..9b5a93e 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -2465,7 +2465,7 @@ Em.I18n.translations = {
   'dashboard.services.hdfs.capacity':'HDFS Disk Usage',
   'dashboard.services.hdfs.capacity.dfsUsed':'Disk Usage (DFS Used)',
   'dashboard.services.hdfs.capacity.nonDfsUsed':'Disk Usage (Non DFS Used)',
-  'dashboard.services.hdfs.capacity.remaining':'Disk Usage (Remaining)',
+  'dashboard.services.hdfs.capacity.remaining':'Disk Remaining',
   'dashboard.services.hdfs.capacityUsed':'{0} / {1} ({2}%)',
   'dashboard.services.hdfs.totalFilesAndDirs':'Total Files + Directories',
   'dashboard.services.hdfs.datanodes':'DataNodes',


[38/50] [abbrv] ambari git commit: AMBARI-13532. RU: Upgrade window does not show 'Upgrade Finished' status on completion.(xiwang)

Posted by nc...@apache.org.
AMBARI-13532. RU: Upgrade window does not show 'Upgrade Finished' status on completion.(xiwang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6cd152c4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6cd152c4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6cd152c4

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 6cd152c4d834ee9d7782e85e9fe7ad2c4060135d
Parents: 3864bc1
Author: Xi Wang <xi...@apache.org>
Authored: Thu Oct 22 16:57:52 2015 -0700
Committer: Xi Wang <xi...@apache.org>
Committed: Thu Oct 22 17:40:29 2015 -0700

----------------------------------------------------------------------
 .../controllers/main/admin/stack_and_upgrade_controller.js   | 6 ++++--
 .../views/main/admin/stack_upgrade/upgrade_wizard_view.js    | 4 ++--
 .../main/admin/stack_and_upgrade_controller_test.js          | 4 ++--
 .../main/admin/stack_upgrade/upgrade_wizard_view_test.js     | 8 +-------
 4 files changed, 9 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6cd152c4/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
index 9c779ce..f751277 100644
--- a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
@@ -196,10 +196,12 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
   requestStatus: function () {
     if (this.get('isSuspended')) {
       return 'SUSPENDED';
+    } else if (this.get('upgradeData.Upgrade')){
+      return this.get('upgradeData.Upgrade.request_status');
     } else {
-      return App.get('upgradeState');
+      return '';
     }
-  }.property('isSuspended', 'App.upgradeState'),
+  }.property('isSuspended', 'upgradeData.Upgrade.request_status'),
 
   init: function () {
     this.initDBProperties();

http://git-wip-us.apache.org/repos/asf/ambari/blob/6cd152c4/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
index 6d79514..d1bfe18 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
@@ -183,7 +183,7 @@ App.upgradeWizardView = Em.View.extend({
    */
   upgradeStatusLabel: function() {
     var labelKey = null;
-    switch (App.get('upgradeState')) {
+    switch (this.get('controller.upgradeData.Upgrade.request_status')) {
       case 'QUEUED':
       case 'PENDING':
       case 'IN_PROGRESS':
@@ -213,7 +213,7 @@ App.upgradeWizardView = Em.View.extend({
     } else {
       return "";
     }
-  }.property('App.upgradeState', 'controller.isDowngrade', 'controller.isSuspended'),
+  }.property('controller.upgradeData.Upgrade.request_status', 'controller.isDowngrade', 'controller.isSuspended'),
 
   /**
    * toggle details box

http://git-wip-us.apache.org/repos/asf/ambari/blob/6cd152c4/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js b/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
index 0124608..e0cab41 100644
--- a/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
+++ b/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
@@ -75,14 +75,14 @@ describe('App.MainAdminStackAndUpgradeController', function() {
 
   describe("#requestStatus", function() {
     it("isSuspended false", function() {
-      App.set('upgradeState', 'ABORTED');
       controller.set('isSuspended', false);
+      controller.set('upgradeData', { Upgrade: {request_status: 'ABORTED'}});
       controller.propertyDidChange('requestStatus');
       expect(controller.get('requestStatus')).to.equal('ABORTED');
     });
     it("isSuspended true", function() {
-      App.set('upgradeState', 'ABORTED');
       controller.set('isSuspended', true);
+      controller.set('upgradeData', { Upgrade: {request_status: 'ABORTED'}});
       controller.propertyDidChange('requestStatus');
       expect(controller.get('requestStatus')).to.equal('SUSPENDED');
     });

http://git-wip-us.apache.org/repos/asf/ambari/blob/6cd152c4/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
index 69e8665..5a35d59 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
@@ -448,12 +448,6 @@ describe('App.upgradeWizardView', function () {
   });
 
   describe("#upgradeStatusLabel", function () {
-    beforeEach(function () {
-      this.mock = sinon.stub(App, 'get');
-    });
-    afterEach(function () {
-      this.mock.restore();
-    });
     var testCases = [
       {
         data: {
@@ -614,7 +608,7 @@ describe('App.upgradeWizardView', function () {
         it('status = ' + test.data.status + ", isDowngrade = " + test.data.isDowngrade, function () {
           view.set('controller.isDowngrade', test.data.isDowngrade);
           view.set('controller.isSuspended', test.data.isSuspended);
-          this.mock.returns(test.data.status);
+          view.set('controller.upgradeData.Upgrade.request_status', test.data.status);
           view.propertyDidChange('upgradeStatusLabel');
           expect(view.get('upgradeStatusLabel')).to.equal(test.result);
         });


[26/50] [abbrv] ambari git commit: AMBARI-13527. Flume agent combobox doesn't work. (akovalenko)

Posted by nc...@apache.org.
AMBARI-13527. Flume agent combobox doesn't work. (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2ff92a91
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2ff92a91
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2ff92a91

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 2ff92a914cd14441fd33ec0b5ef1535c8fa315c6
Parents: 3383f02
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Thu Oct 22 18:07:02 2015 +0300
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Thu Oct 22 18:21:53 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/styles/application.less | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2ff92a91/ambari-web/app/styles/application.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/application.less b/ambari-web/app/styles/application.less
index 8486411..3b13d1f 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -5777,7 +5777,9 @@ input[type="radio"].align-checkbox, input[type="checkbox"].align-checkbox {
     position: static;
   }
   .flume-agents-actions {
-    position: static;
+    .btn-group {
+      position: static;
+    }
     a {
       text-decoration: none;
     }


[41/50] [abbrv] ambari git commit: AMBARI-13520. Implement mapping-methods for collections (onechiporenko)

Posted by nc...@apache.org.
AMBARI-13520. Implement mapping-methods for collections (onechiporenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4b66a824
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4b66a824
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4b66a824

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 4b66a8243eb23531188127349ec50163dd88c9b7
Parents: b4468ce
Author: Oleg Nechiporenko <on...@apache.org>
Authored: Thu Oct 22 13:40:47 2015 +0300
Committer: Oleg Nechiporenko <on...@apache.org>
Committed: Fri Oct 23 11:27:37 2015 +0300

----------------------------------------------------------------------
 .../alerts/manage_alert_groups_controller.js    |  2 +
 ambari-web/app/controllers/wizard.js            |  5 +-
 .../app/controllers/wizard/step6_controller.js  | 20 +----
 .../mappers/alert_definition_summary_mapper.js  | 10 +--
 .../app/mappers/alert_definitions_mapper.js     | 16 +---
 .../mixins/common/configs/configs_comparator.js |  5 +-
 .../mixins/common/configs/enhanced_configs.js   | 43 ++++++----
 ambari-web/app/models/configs/config_group.js   |  5 +-
 ambari-web/app/utils/blueprint.js               |  8 +-
 ambari-web/app/utils/helper.js                  | 89 ++++++++++++++++++++
 ambari-web/test/utils/helper_test.js            | 88 +++++++++++++++++++
 11 files changed, 221 insertions(+), 70 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4b66a824/ambari-web/app/controllers/main/alerts/manage_alert_groups_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/alerts/manage_alert_groups_controller.js b/ambari-web/app/controllers/main/alerts/manage_alert_groups_controller.js
index e956a9e..2ee9213 100644
--- a/ambari-web/app/controllers/main/alerts/manage_alert_groups_controller.js
+++ b/ambari-web/app/controllers/main/alerts/manage_alert_groups_controller.js
@@ -340,6 +340,8 @@ App.ManageAlertGroupsController = Em.Controller.extend({
     var availableDefinitions = [];
     var sharedDefinitions = App.AlertDefinition.find();
 
+    usedDefinitionsMap = selectedAlertGroup.get('definitions').toWickMapByProperty('name');
+
     selectedAlertGroup.get('definitions').forEach(function (def) {
       usedDefinitionsMap[def.name] = true;
     });

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b66a824/ambari-web/app/controllers/wizard.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard.js b/ambari-web/app/controllers/wizard.js
index 517122f..2acac59 100644
--- a/ambari-web/app/controllers/wizard.js
+++ b/ambari-web/app/controllers/wizard.js
@@ -905,10 +905,7 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, App.ThemesMappingM
     var serviceConfigProperties = [];
     var fileNamesToUpdate = this.getDBProperty('fileNamesToUpdate') || [];
     var installedServiceNames = stepController.get('installedServiceNames') || [];
-    var installedServiceNamesMap = {};
-    installedServiceNames.forEach(function(name) {
-      installedServiceNamesMap[name] = true;
-    });
+    var installedServiceNamesMap = installedServiceNames.toWickMap();
     stepController.get('stepConfigs').forEach(function (_content) {
 
       if (_content.serviceName === 'YARN') {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b66a824/ambari-web/app/controllers/wizard/step6_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step6_controller.js b/ambari-web/app/controllers/wizard/step6_controller.js
index 3427aa1..a7ac939 100644
--- a/ambari-web/app/controllers/wizard/step6_controller.js
+++ b/ambari-web/app/controllers/wizard/step6_controller.js
@@ -180,11 +180,8 @@ App.WizardStep6Controller = Em.Controller.extend(App.BlueprintMixin, {
     var err = false;
     var hosts = this.get('hosts');
     var headers = this.get('headers');
-    var headersMap = {};
+    var headersMap = headers.toWickMapByProperty('name');
 
-    headers.forEach(function (header) {
-      headersMap[header.name] = true;
-    });
     hosts.forEach(function (host) {
       host.checkboxes.forEach(function (checkbox) {
         if (headersMap[checkbox.component]) {
@@ -379,10 +376,7 @@ App.WizardStep6Controller = Em.Controller.extend(App.BlueprintMixin, {
       masterHosts = [],
       headers = this.get('headers'),
       masterHostNames = this.get('content.masterComponentHosts').mapProperty('hostName').uniq(),
-      masterHostNamesMap = {};
-    masterHostNames.forEach(function(hostName) {
-      masterHostNamesMap[hostName] = true;
-    });
+      masterHostNamesMap = masterHostNames.toWickMap();
 
     this.getHostNames().forEach(function (_hostName) {
       var hasMaster = masterHostNamesMap[_hostName];
@@ -459,14 +453,8 @@ App.WizardStep6Controller = Em.Controller.extend(App.BlueprintMixin, {
       });
     } else {
 
-      var slaveComponentsMap = {};
-      slaveComponents.forEach(function(slave) {
-        slaveComponentsMap[Em.get(slave, 'componentName')] = slave;
-      });
-      var hostsObjMap = {};
-      hostsObj.forEach(function(host) {
-        hostsObjMap[Em.get(host, 'hostName')] = host;
-      });
+      var slaveComponentsMap = slaveComponents.toMapByProperty('componentName');
+      var hostsObjMap =  hostsObj.toMapByProperty('hostName');
 
       this.get('headers').forEach(function (header) {
         var nodes = slaveComponentsMap[header.get('name')];

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b66a824/ambari-web/app/mappers/alert_definition_summary_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/alert_definition_summary_mapper.js b/ambari-web/app/mappers/alert_definition_summary_mapper.js
index b795638..b6aa135 100644
--- a/ambari-web/app/mappers/alert_definition_summary_mapper.js
+++ b/ambari-web/app/mappers/alert_definition_summary_mapper.js
@@ -27,10 +27,7 @@ App.alertDefinitionSummaryMapper = App.QuickDataMapper.create({
 
     if (!data.alerts_summary_grouped) return;
     var alertDefinitions = App.AlertDefinition.find();
-    var alertDefinitionsMap = {};
-    alertDefinitions.forEach(function (definition) {
-      alertDefinitionsMap[definition.get('id')] = definition;
-    });
+    var alertDefinitionsMap = alertDefinitions.toArray().toMapByProperty('id');
     var summaryMap = {};
     data.alerts_summary_grouped.forEach(function(alertDefinitionSummary) {
       var alertDefinition = alertDefinitionsMap[alertDefinitionSummary.definition_id];
@@ -67,10 +64,7 @@ App.alertDefinitionSummaryMapper = App.QuickDataMapper.create({
     // set alertsCount and hasCriticalAlerts for each service
     var groupedByServiceName = dataManipulation.groupPropertyValues(alertDefinitions, 'service.serviceName');
     var services = App.Service.find();
-    var servicesMap = {};
-    services.forEach(function (service) {
-      servicesMap[service.get('id')] = service;
-    });
+    var servicesMap = services.toArray().toMapByProperty('id');
     Object.keys(groupedByServiceName).forEach(function(serviceName) {
       var service = servicesMap[serviceName];
       if (service) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b66a824/ambari-web/app/mappers/alert_definitions_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/alert_definitions_mapper.js b/ambari-web/app/mappers/alert_definitions_mapper.js
index 80d175e..7dad450 100644
--- a/ambari-web/app/mappers/alert_definitions_mapper.js
+++ b/ambari-web/app/mappers/alert_definitions_mapper.js
@@ -79,14 +79,10 @@ App.alertDefinitionsMapper = App.QuickDataMapper.create({
           alertMetricsUriDefinitions = [],
           alertGroupsMap = App.cache['previousAlertGroupsMap'],
           existingAlertDefinitions = App.AlertDefinition.find(),
-          existingAlertDefinitionsMap = {},
+          existingAlertDefinitionsMap = existingAlertDefinitions.toArray().toMapByProperty('id'),
           alertDefinitionsToDelete = existingAlertDefinitions.mapProperty('id'),
           rawSourceData = {};
 
-      existingAlertDefinitions.forEach(function (d) {
-        existingAlertDefinitionsMap[d.get('id')] = d;
-      });
-
       json.items.forEach(function (item) {
         var convertedReportDefinitions = [];
         var reporting = item.AlertDefinition.source.reporting;
@@ -206,10 +202,7 @@ App.alertDefinitionsMapper = App.QuickDataMapper.create({
    * @param data
    */
   setMetricsSourcePropertyLists: function (model, data) {
-    var modelsMap = {};
-    model.find().forEach(function (m) {
-      modelsMap[m.get('id')] = m;
-    });
+    var modelsMap = model.find().toArray().toMapByProperty('id');
     data.forEach(function (record) {
       var m = modelsMap[record.id];
       if (m) {
@@ -224,10 +217,7 @@ App.alertDefinitionsMapper = App.QuickDataMapper.create({
    */
   setAlertDefinitionsRawSourceData: function (rawSourceData) {
     var allDefinitions = App.AlertDefinition.find();
-    var allDefinitionsMap = {};
-    allDefinitions.forEach(function(d) {
-      allDefinitionsMap[d.get('id')] = d;
-    });
+    var allDefinitionsMap = allDefinitions.toArray().toMapByProperty('id');
     for (var alertDefinitionId in rawSourceData) {
       if (rawSourceData.hasOwnProperty(alertDefinitionId)) {
         var m = allDefinitionsMap[+alertDefinitionId];

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b66a824/ambari-web/app/mixins/common/configs/configs_comparator.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/configs/configs_comparator.js b/ambari-web/app/mixins/common/configs/configs_comparator.js
index 0b30cb6..bb7bc88 100644
--- a/ambari-web/app/mixins/common/configs/configs_comparator.js
+++ b/ambari-web/app/mixins/common/configs/configs_comparator.js
@@ -75,7 +75,7 @@ App.ConfigsComparator = Em.Mixin.create({
    */
   initCompareConfig: function(allConfigs, json) {
     var serviceVersionMap = {};
-    var configNamesMap = {};
+    var configNamesMap = allConfigs.toWickMapByProperty('name');
     var serviceName = this.get('content.serviceName');
     var compareVersionNumber = this.get('compareServiceVersion').get('version');
     //indicate whether compared versions are from non-default group
@@ -85,9 +85,6 @@ App.ConfigsComparator = Em.Mixin.create({
     if (compareNonDefaultVersions) {
       serviceVersionMap[this.get('selectedVersion')] = {};
     }
-    allConfigs.mapProperty('name').forEach(function(name) {
-      configNamesMap[name] = true;
-    });
 
     json.items.forEach(function (item) {
       item.configurations.forEach(function (configuration) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b66a824/ambari-web/app/mixins/common/configs/enhanced_configs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/configs/enhanced_configs.js b/ambari-web/app/mixins/common/configs/enhanced_configs.js
index 009a775..d984e7d 100644
--- a/ambari-web/app/mixins/common/configs/enhanced_configs.js
+++ b/ambari-web/app/mixins/common/configs/enhanced_configs.js
@@ -183,9 +183,8 @@ App.EnhancedConfigsMixin = Em.Mixin.create({
    * @param {App.ServiceConfig[]} stepConfigs
    */
   clearDependenciesForInstalledServices: function(installedServices, stepConfigs) {
-    var allConfigs = stepConfigs.mapProperty('configs').filter(function(item) {
-      return item.length;
-    }).reduce(function(p, c) {
+    var stackConfigsMap = App.StackConfigProperty.find().toArray().toMapByProperty('name');
+    var allConfigs = stepConfigs.mapProperty('configs').filterProperty('length').reduce(function(p, c) {
       if (p) {
         return p.concat(c);
       }
@@ -193,7 +192,7 @@ App.EnhancedConfigsMixin = Em.Mixin.create({
     var cleanDependencies = this.get('_dependentConfigValues').reject(function(item) {
       if ('hadoop.proxyuser'.contains(Em.get(item, 'name'))) return false;
       if (installedServices.contains(Em.get(item, 'serviceName'))) {
-        var stackProperty = App.StackConfigProperty.find().findProperty("name", item.propertyName);
+        var stackProperty = stackConfigsMap[item.propertyName];
         var parentConfigs = stackProperty && stackProperty.get('propertyDependsOn');
         if (!parentConfigs || !parentConfigs.length) {
           return true;
@@ -440,7 +439,7 @@ App.EnhancedConfigsMixin = Em.Mixin.create({
     for (var key in configObject) {
 
       /**  defines main info for file name (service name, config group, config that belongs to filename) **/
-      var service = App.config.getServiceByConfigType(key);
+      var service = App.config.get('serviceByConfigTypeMap')[key];
       var serviceName = service.get('serviceName');
       var stepConfig = this.get('stepConfigs').findProperty('serviceName', serviceName);
       if (stepConfig) {
@@ -451,7 +450,6 @@ App.EnhancedConfigsMixin = Em.Mixin.create({
 
         for (var propertyName in configObject[key].properties) {
 
-          var dependentProperty = this.get('_dependentConfigValues').filterProperty('propertyName', propertyName).filterProperty('fileName', key).findProperty('configGroup', group && Em.get(group,'name'));
           var cp = configProperties.findProperty('name', propertyName);
           var override = (notDefaultGroup && group && cp && cp.get('overrides')) ? cp.get('overrides').findProperty('group.name', group.get('name')) : null;
 
@@ -471,6 +469,10 @@ App.EnhancedConfigsMixin = Em.Mixin.create({
           recommendedValue = validator.isValidFloat(recommendedValue) ? parseFloat(recommendedValue).toString() : recommendedValue;
 
           if (!updateOnlyBoundaries && !parentPropertiesNames.contains(App.config.configId(propertyName, key)) && initialValue != recommendedValue) { //on first initial request we don't need to change values
+            var groupName = group && Em.get(group, 'name');
+            var dependentProperty = this.get('_dependentConfigValues').find(function (dcv) {
+              return dcv.propertyName === propertyName && dcv.fileName === key && dcv.configGroup === groupName;
+            });
             if (dependentProperty) {
               Em.set(dependentProperty, 'value', initialValue);
               Em.set(dependentProperty, 'recommendedValue', recommendedValue);
@@ -545,6 +547,16 @@ App.EnhancedConfigsMixin = Em.Mixin.create({
     }
   },
 
+  installedServices: function () {
+    return App.StackService.find().toArray().toMapByCallback('serviceName', function (item) {
+      return Em.get(item, 'isInstalled');
+    });
+  }.property(),
+
+  stackConfigsMap: function () {
+    return App.StackConfigProperty.find().toArray().toMapByProperty('id');
+  }.property(),
+
   /**
    * Save property attributes received from recommendations. These attributes are minimum, maximum,
    * increment_step. Attributes are stored in <code>App.StackConfigProperty</code> model.
@@ -556,14 +568,13 @@ App.EnhancedConfigsMixin = Em.Mixin.create({
    */
   _saveRecommendedAttributes: function(configs, parentPropertiesNames, updateOnlyBoundaries, selectedConfigGroup) {
     var self = this;
-    var wizardController = self.get('wizardController');
+    var installedServices = this.get('installedServices');
+    var wizardController = this.get('wizardController');
     var fileNamesToUpdate = wizardController ? this.get('_fileNamesToUpdate') : [];
-    var stackConfigsMap = {};
-    App.StackConfigProperty.find().forEach(function (c) {
-      stackConfigsMap[c.get('id')] = c;
-    });
+    var stackConfigsMap = this.get('stackConfigsMap');
     Em.keys(configs).forEach(function (siteName) {
-      var service = App.config.getServiceByConfigType(siteName);
+      var fileName = App.config.getOriginalFileName(siteName);
+      var service = App.config.get('serviceByConfigTypeMap')[siteName];
       var serviceName = service.get('serviceName');
       var group = self.getGroupForService(serviceName);
       var stepConfig = self.get('stepConfigs').findProperty('serviceName', serviceName);
@@ -576,9 +587,11 @@ App.EnhancedConfigsMixin = Em.Mixin.create({
         Em.keys(attributes).forEach(function (attributeName) {
           if (attributeName == 'delete' && cp) {
             if (!updateOnlyBoundaries) {
-              var fileName = App.config.getOriginalFileName(siteName);
               var modifiedFileNames = self.get('modifiedFileNames');
-              var dependentProperty = self.get('_dependentConfigValues').filterProperty('propertyName', propertyName).filterProperty('fileName', siteName).findProperty('configGroup', group && Em.get(group,'name'));
+              var groupName = group && Em.get(group,'name');
+              var dependentProperty = self.get('_dependentConfigValues').find(function (dcv) {
+                return dcv.propertyName === propertyName && dcv.fileName === siteName && dcv.configGroup === groupName;
+              });
               if (dependentProperty) {
                 Em.set(dependentProperty, 'toDelete', true);
                 Em.set(dependentProperty, 'toAdd', false);
@@ -603,7 +616,7 @@ App.EnhancedConfigsMixin = Em.Mixin.create({
               }
               if (modifiedFileNames && !modifiedFileNames.contains(fileName)) {
                modifiedFileNames.push(fileName);
-              } else if (wizardController && App.StackService.find(service.get('serviceName')).get('isInstalled')) {
+              } else if (wizardController && installedServices[service.get('serviceName')]) {
                 if (!fileNamesToUpdate.contains(fileName)) {
                   fileNamesToUpdate.push(fileName);
                 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b66a824/ambari-web/app/models/configs/config_group.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/config_group.js b/ambari-web/app/models/configs/config_group.js
index 467f53e..c204774 100644
--- a/ambari-web/app/models/configs/config_group.js
+++ b/ambari-web/app/models/configs/config_group.js
@@ -122,13 +122,10 @@ App.ServiceConfigGroup = DS.Model.extend({
    */
   availableHosts: function () {
     if (this.get('isDefault')) return [];
-    var unusedHostsMap = {};
+    var unusedHostsMap = this.get('parentConfigGroup.hosts').toWickMap();
     var availableHosts = [];
     var sharedHosts = this.get('clusterHosts');
     // parentConfigGroup.hosts(hosts from default group) - are available hosts, which don't belong to any group
-    this.get('parentConfigGroup.hosts').forEach(function (hostName) {
-      unusedHostsMap[hostName] = true;
-    });
     sharedHosts.forEach(function (host) {
       if (unusedHostsMap[host.get('id')]) {
         availableHosts.pushObject(Ember.Object.create({

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b66a824/ambari-web/app/utils/blueprint.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/blueprint.js b/ambari-web/app/utils/blueprint.js
index 687c7fd..d3138e5 100644
--- a/ambari-web/app/utils/blueprint.js
+++ b/ambari-web/app/utils/blueprint.js
@@ -137,15 +137,11 @@ module.exports = {
    * @returns {object}
    */
   blueprintToObject: function(blueprint, field) {
-    var ret = {};
     var valueToMap = Em.get(blueprint, field);
     if (!Array.isArray(valueToMap)) {
-      return ret;
+      return {};
     }
-    valueToMap.forEach(function(n) {
-      ret[Em.get(n, 'name')] = n;
-    });
-    return ret;
+    return valueToMap.toMapByProperty('name');
   },
 
   matchGroups: function(masterBlueprint, slaveBlueprint) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b66a824/ambari-web/app/utils/helper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/helper.js b/ambari-web/app/utils/helper.js
index 9489eee..0f4a5c3 100644
--- a/ambari-web/app/utils/helper.js
+++ b/ambari-web/app/utils/helper.js
@@ -233,6 +233,95 @@ Array.prototype.sortPropertyLight = function (path) {
   });
   return this;
 };
+
+/**
+ * Create map from array with executing provided callback for each array's item
+ * Example:
+ * <pre>
+ *   var array = [{a: 1, b: 3}, {a: 2, b: 2}, {a: 3, b: 1}];
+ *   var map = array.toMapByCallback('a', function (item) {
+ *    return Em.get(item, 'b');
+ *   });
+ *   console.log(map); // {1: 3, 2: 2, 3: 1}
+ * </pre>
+ * <code>map[1]</code> is much more faster than <code>array.findProperty('a', 1).get('b')</code>
+ *
+ * @param {string} property
+ * @param {Function} callback
+ * @returns {object}
+ * @method toMapByCallback
+ */
+Array.prototype.toMapByCallback = function (property, callback) {
+  var ret = {};
+  Em.assert('`property` can\'t be empty string', property.length);
+  Em.assert('`callback` should be a function', 'function' === Em.typeOf(callback));
+  this.forEach(function (item) {
+    var key = Em.get(item, property);
+    ret[key] = callback(item, property);
+  });
+  return ret;
+};
+
+/**
+ * Create map from array
+ * Example:
+ * <pre>
+ *   var array = [{a: 1}, {a: 2}, {a: 3}];
+ *   var map = array.toMapByProperty('a'); // {1: {a: 1}, 2: {a: 2}, 3: {a: 3}}
+ * </pre>
+ * <code>map[1]</code> is much more faster than <code>array.findProperty('a', 1)</code>
+ *
+ * @param {string} property
+ * @return {object}
+ * @method toMapByProperty
+ * @see toMapByCallback
+ */
+Array.prototype.toMapByProperty = function (property) {
+  return this.toMapByCallback(property, function (item) {
+    return item;
+  });
+};
+
+/**
+ * Create wick map from array
+ * Example:
+ * <pre>
+ *   var array = [{a: 1}, {a: 2}, {a: 3}];
+ *   var map = array.toWickMapByProperty('a'); // {1: true, 2: true, 3: true}
+ * </pre>
+ * <code>map[1]</code> works faster than <code>array.someProperty('a', 1)</code>
+ *
+ * @param {string} property
+ * @return {object}
+ * @method toWickMapByProperty
+ * @see toMapByCallback
+ */
+Array.prototype.toWickMapByProperty = function (property) {
+  return this.toMapByCallback(property, function () {
+    return true;
+  });
+};
+
+/**
+ * Create wick map from array of primitives
+ * Example:
+ * <pre>
+ *   var array = [1, 2, 3];
+ *   var map = array.toWickMap(); // {1: true, 2: true, 3: true}
+ * </pre>
+ * <code>map[1]</code> works faster than <code>array.contains(1)</code>
+ *
+ * @returns {object}
+ * @method toWickMap
+ */
+Array.prototype.toWickMap = function () {
+  var ret = {};
+  this.forEach(function (item) {
+    ret[item] = true;
+  });
+  return ret;
+};
+
 /** @namespace Em **/
 Em.CoreObject.reopen({
   t:function (key, attrs) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b66a824/ambari-web/test/utils/helper_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/utils/helper_test.js b/ambari-web/test/utils/helper_test.js
index 63fd59d..62acf58 100644
--- a/ambari-web/test/utils/helper_test.js
+++ b/ambari-web/test/utils/helper_test.js
@@ -17,8 +17,10 @@
  */
 var App = require('app');
 require('utils/helper');
+var O = Em.Object;
 
 describe('utils/helper', function() {
+
   describe('String helpers', function() {
     describe('#trim()', function(){
       it('should replace first space', function() {
@@ -94,6 +96,7 @@ describe('utils/helper', function() {
       });
     });
   });
+
   describe('Number helpers', function(){
     describe('#toDaysHoursMinutes()', function(){
       var time = 1000000000;
@@ -113,7 +116,56 @@ describe('utils/helper', function() {
       });
     });
   });
+
   describe('Array helpers', function(){
+
+    var tests = Em.A([
+      {
+        m: 'plain objects, no nesting',
+        array: [{a: 1}, {a: 2}, {a: 3}],
+        property: 'a',
+        callback3: function (item) {
+          return Em.get(item, 'a');
+        },
+        e1: {1: {a: 1}, 2: {a: 2}, 3: {a: 3}},
+        e2: {1: true, 2: true, 3: true},
+        e3: {1: 1, 2: 2, 3: 3}
+      },
+      {
+        m: 'plain objects, nesting',
+        array: [{a: {a: 1}}, {a: {a: 2}}, {a:{a: 3}}],
+        property: 'a.a',
+        callback3: function (item) {
+          return Em.get(item, 'a.a');
+        },
+        e1: {1: {a: {a: 1}}, 2: {a: {a: 2}}, 3: {a: {a: 3}}},
+        e2: {1: true, 2: true, 3: true},
+        e3: {1: 1, 2: 2, 3: 3}
+      },
+      {
+        m: 'Ember objects, no nesting',
+        array: [O.create({a: 1}), O.create({a: 2}), O.create({a: 3})],
+        property: 'a',
+        callback3: function (item) {
+          return Em.get(item, 'a');
+        },
+        e1: {1: O.create({a: 1}), 2: O.create({a: 2}), 3: O.create({a: 3})},
+        e2: {1: true, 2: true, 3: true},
+        e3: {1: 1, 2: 2, 3: 3}
+      },
+      {
+        m: 'Ember objects, nesting',
+        array: [O.create({a: {a: 1}}), O.create({a: {a: 2}}), O.create({a: {a: 3}})],
+        property: 'a.a',
+        callback3: function (item) {
+          return Em.get(item, 'a.a');
+        },
+        e1: {1: O.create({a: {a: 1}}), 2: O.create({a: {a: 2}}), 3: O.create({a: {a: 3}})},
+        e2: {1: true, 2: true, 3: true},
+        e3: {1: 1, 2: 2, 3: 3}
+      }
+    ]);
+
     describe('#sortPropertyLight()', function(){
       var testable = [
         { a: 2 },
@@ -137,7 +189,41 @@ describe('utils/helper', function() {
         expect(testable.sortPropertyLight(['a'])).to.ok;
       });
     });
+
+    describe('#toMapByProperty', function () {
+      tests.forEach(function (test) {
+        it(test.m, function () {
+          expect(test.array.toMapByProperty(test.property)).to.eql(test.e1);
+        });
+      });
+    });
+
+    describe('#toWickMapByProperty', function () {
+      tests.forEach(function (test) {
+        it(test.m, function () {
+          expect(test.array.toWickMapByProperty(test.property)).to.eql(test.e2);
+        });
+      });
+    });
+
+    describe('#toMapByCallback', function () {
+      tests.forEach(function (test) {
+        it(test.m, function () {
+          expect(test.array.toMapByCallback(test.property, test.callback3)).to.eql(test.e3);
+        });
+      });
+    });
+
+    describe('#toWickMap', function () {
+
+      it('should convert to wick map', function () {
+        expect([1,2,3].toWickMap()).to.eql({1: true, 2: true, 3: true});
+      });
+
+    });
+
   });
+
   describe('App helpers', function(){
     var appendDiv = function() {
       $('body').append('<div id="tooltip-test"></div>');
@@ -347,6 +433,7 @@ describe('utils/helper', function() {
       });
     });
   });
+
   describe('#App.permit()', function() {
     var obj = {
       a1: 'v1',
@@ -484,4 +571,5 @@ describe('utils/helper', function() {
       });
     });
   });
+
 });


[04/50] [abbrv] ambari git commit: AMBARI-13501. Default cursor for export graph data dropdown menus

Posted by nc...@apache.org.
AMBARI-13501. Default cursor for export graph data dropdown menus


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0a9df8c8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0a9df8c8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0a9df8c8

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 0a9df8c8b4ac6502c72c2bc25aa6cf87081bb814
Parents: 2ff2d2f
Author: Alex Antonenko <hi...@gmail.com>
Authored: Wed Oct 21 19:16:56 2015 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Wed Oct 21 20:02:35 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/styles/common.less | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0a9df8c8/ambari-web/app/styles/common.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/common.less b/ambari-web/app/styles/common.less
index 60b7553..a0252d3 100644
--- a/ambari-web/app/styles/common.less
+++ b/ambari-web/app/styles/common.less
@@ -354,7 +354,10 @@
   top: 25px;
   min-width: 60px;
   font-size: 14px;
-  cursor: default;
+  cursor: auto;
+  a {
+    cursor: pointer;
+  }
 }
 
 .bootstrap-checkbox {


[05/50] [abbrv] ambari git commit: AMBARI-13505 Change Ambari UI Element Identifiers. (atkach)

Posted by nc...@apache.org.
AMBARI-13505 Change Ambari UI Element Identifiers. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d259917d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d259917d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d259917d

Branch: refs/heads/branch-dev-patch-upgrade
Commit: d259917d7cc3a574250ded191097d6cbd6c08900
Parents: 66f8ad4
Author: Andrii Tkach <at...@hortonworks.com>
Authored: Wed Oct 21 20:07:15 2015 +0300
Committer: Andrii Tkach <at...@hortonworks.com>
Committed: Wed Oct 21 20:07:15 2015 +0300

----------------------------------------------------------------------
 .../views/stackVersions/stackVersionPage.html   |  6 +-
 .../app/controllers/wizard/step6_controller.js  |  8 ++-
 ambari-web/app/styles/alerts.less               |  4 +-
 .../common/configs/config_history_flow.hbs      |  6 +-
 .../common/configs/service_config_category.hbs  |  2 +-
 ambari-web/app/templates/main/admin.hbs         |  4 +-
 .../app/templates/main/admin/kerberos.hbs       |  2 +-
 .../admin/stack_upgrade/edit_repositories.hbs   |  6 +-
 .../admin/stack_upgrade/upgrade_version_box.hbs |  6 +-
 .../main/admin/stack_upgrade/versions.hbs       |  4 +-
 ambari-web/app/templates/main/alerts.hbs        | 10 ++--
 .../alerts/manage_alert_notifications_popup.hbs | 26 ++++-----
 .../templates/main/dashboard/config_history.hbs | 10 ++--
 .../main/dashboard/widgets/hbase_links.hbs      |  8 +--
 .../main/dashboard/widgets/hdfs_links.hbs       | 12 ++--
 .../main/dashboard/widgets/yarn_links.hbs       |  4 +-
 .../app/templates/main/host/addHost/step4.hbs   |  4 +-
 .../main/host/details/host_component.hbs        |  2 +-
 ambari-web/app/templates/main/host/summary.hbs  |  2 +-
 .../app/templates/main/service/info/configs.hbs |  4 +-
 .../app/templates/main/service/info/summary.hbs |  2 +-
 .../service/info/summary/client_components.hbs  |  2 +-
 .../service/info/summary/master_components.hbs  |  4 +-
 .../service/info/summary/slave_components.hbs   |  4 +-
 ambari-web/app/templates/main/service/item.hbs  |  2 +-
 .../manage_configuration_groups_popup.hbs       |  4 +-
 .../app/templates/main/service/reassign.hbs     | 12 ++--
 .../templates/main/service/services/flume.hbs   | 20 +++----
 .../templates/main/service/services/hbase.hbs   | 28 ++++-----
 .../templates/main/service/services/hdfs.hbs    | 60 ++++++++++----------
 .../templates/main/service/services/ranger.hbs  |  4 +-
 .../templates/main/service/services/storm.hbs   | 24 ++++----
 .../templates/main/service/services/yarn.hbs    | 45 ++++++++-------
 ambari-web/app/templates/wizard/step4.hbs       | 10 ++--
 ambari-web/app/templates/wizard/step6.hbs       | 10 ++--
 ambari-web/app/templates/wizard/step9.hbs       |  4 +-
 ambari-web/app/utils/ajax/ajax.js               |  1 +
 ambari-web/app/utils/hosts.js                   |  2 +
 .../common/assign_master_components_view.js     | 19 +++++++
 .../common/configs/compare_property_view.js     |  1 +
 .../common/configs/overriddenProperty_view.js   |  1 +
 .../dependent_configs_list_popup.js             |  1 +
 .../wizard/step3/hostWarningPopupBody_view.js   |  3 +-
 43 files changed, 216 insertions(+), 177 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
index d62c9a6..0c6ca84 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
@@ -75,7 +75,7 @@
         <div class="col-sm-7"><h5><label>Base URL</label></h5></div>
       </div>
       <div class="clearfix border-bottom bottom-margin" ng-repeat="os in osList">
-        <div class="col-sm-2">
+        <div class="col-sm-2 os-checkbox">
           <div class="checkbox">
             <label>
               <input type="checkbox" ng-model="os.selected" ng-change="toggleOSSelect()"> {{os.OperatingSystems.os_type}}
@@ -83,7 +83,7 @@
           </div>
         </div>
         <div class="col-sm-10">
-          <div class="form-group" ng-class="{'has-error': repository.hasError }" ng-repeat="repository in os.repositories">
+          <div class="form-group {{repository.Repositories.repo_name}}" ng-class="{'has-error': repository.hasError }" ng-repeat="repository in os.repositories">
             <div class="col-sm-3"><label class="control-label">{{repository.Repositories.repo_name}}</label></div>
             <div class="col-sm-9"><input type="text" class="form-control" ng-model="repository.Repositories.base_url"
                                          ng-change="clearError()" ng-disabled="!os.selected"></div>
@@ -91,7 +91,7 @@
         </div>
       </div>
       <div class="clearfix">
-        <div class="col-sm-12">
+        <div class="col-sm-12" id="skip-validation">
           <div class="checkbox">
             <label>
               <input type="checkbox" ng-model="skipValidation" ng-change="clearErrors()">

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/controllers/wizard/step6_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step6_controller.js b/ambari-web/app/controllers/wizard/step6_controller.js
index 83828b5..3427aa1 100644
--- a/ambari-web/app/controllers/wizard/step6_controller.js
+++ b/ambari-web/app/controllers/wizard/step6_controller.js
@@ -319,7 +319,9 @@ App.WizardStep6Controller = Em.Controller.extend(App.BlueprintMixin, {
             allChecked: false,
             isRequired: serviceComponent.get('isRequired'),
             noChecked: true,
-            isDisabled: installedServices.someProperty('serviceName', stackService.get('serviceName')) && this.get('isAddServiceWizard')
+            isDisabled: installedServices.someProperty('serviceName', stackService.get('serviceName')) && this.get('isAddServiceWizard'),
+            allId: 'all-' + serviceComponent.get('componentName'),
+            noneId: 'none-' + serviceComponent.get('componentName')
           }));
         }
       }, this);
@@ -330,7 +332,9 @@ App.WizardStep6Controller = Em.Controller.extend(App.BlueprintMixin, {
         label: App.format.role('CLIENT'),
         allChecked: false,
         noChecked: true,
-        isDisabled: false
+        isDisabled: false,
+        allId: 'all-CLIENT',
+        noneId: 'none-CLIENT'
       }));
     }
     this.get('headers').pushObjects(headers);

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/styles/alerts.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/alerts.less b/ambari-web/app/styles/alerts.less
index 3cfafe7..727df04 100644
--- a/ambari-web/app/styles/alerts.less
+++ b/ambari-web/app/styles/alerts.less
@@ -409,7 +409,9 @@
 }
 
 .notification-description {
-  white-space: pre;
+  .input-value {
+    white-space: pre;
+  }
 }
 
 #create-edit-alert-notification {

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/common/configs/config_history_flow.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/configs/config_history_flow.hbs b/ambari-web/app/templates/common/configs/config_history_flow.hbs
index 1c99f57..4f14920 100644
--- a/ambari-web/app/templates/common/configs/config_history_flow.hbs
+++ b/ambari-web/app/templates/common/configs/config_history_flow.hbs
@@ -38,9 +38,9 @@
               <div class="label-wrapper span8"
                    data-toggle="tooltip" {{bindAttr data-original-title="view.compareServiceVersion.fullNotes"}}>
                   {{t services.service.config.configHistory.comparing}}
-                  <span class="label label-info">{{view.displayedServiceVersion.versionText}}</span>
+                  <span class="label label-info current-version-label">{{view.displayedServiceVersion.versionText}}</span>
                   ...
-                  <span class="label label-info">{{view.compareServiceVersion.versionText}}</span>
+                  <span class="label label-info compare-version-label">{{view.compareServiceVersion.versionText}}</span>
                 {{#if view.compareServiceVersion.isCurrent}}
                     <span class="label label-success">{{t common.current}}</span>
                 {{/if}}
@@ -99,7 +99,7 @@
             </div>
             <div class="label-wrapper span8" data-toggle="tooltip" {{bindAttr data-original-title="view.displayedServiceVersion.fullNotes"}}>
               {{#if view.displayedServiceVersion.versionText}}
-                <span class="label label-info">{{view.displayedServiceVersion.versionText}}</span>
+                <span class="label label-info current-version-label">{{view.displayedServiceVersion.versionText}}</span>
               {{/if}}
               {{#if view.displayedServiceVersion.isCurrent}}
                   <span class="label-current label label-success icon-ok" data-toggle="tooltip" {{translateAttr title="common.current"}}></span>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/common/configs/service_config_category.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/configs/service_config_category.hbs b/ambari-web/app/templates/common/configs/service_config_category.hbs
index 6cd1203..bfd77bd 100644
--- a/ambari-web/app/templates/common/configs/service_config_category.hbs
+++ b/ambari-web/app/templates/common/configs/service_config_category.hbs
@@ -18,7 +18,7 @@
 
 <div class="accordion-heading" {{action "onToggleBlock" category target="view"}}>
   <i {{bindAttr class=":pull-left :accordion-toggle view.category.isCollapsed:icon-caret-right:icon-caret-down"}}></i>
-  <a class="accordion-toggle">
+  <a class="accordion-toggle category-header">
     <span class="category-name">{{view.category.displayName}}</span>
     {{#if view.category.errorCount}}
       <span class="badge badge-important">{{view.category.errorCount}}</span>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/admin.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin.hbs b/ambari-web/app/templates/main/admin.hbs
index 4a34970..454bf1e 100644
--- a/ambari-web/app/templates/main/admin.hbs
+++ b/ambari-web/app/templates/main/admin.hbs
@@ -20,7 +20,7 @@
   <div id="main-admin-menu" class="well span2">
       <ul class="nav nav-list">
         {{#each category in view.categories}}
-          {{#view view.NavItemView itemBinding="category.name" }}
+          {{#view view.NavItemView itemBinding="category.name" elementIdBinding="category.name"}}
               <a href="#" {{action "goToAdmin" category.url}} >{{category.label}}</a>
           {{/view}}
         {{/each}}
@@ -31,4 +31,4 @@
       {{outlet}}
     </div>
   </div>
-</div>
\ No newline at end of file
+</div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/admin/kerberos.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/kerberos.hbs b/ambari-web/app/templates/main/admin/kerberos.hbs
index 2ba6001..8b3f9dd 100644
--- a/ambari-web/app/templates/main/admin/kerberos.hbs
+++ b/ambari-web/app/templates/main/admin/kerberos.hbs
@@ -22,7 +22,7 @@
         {{#isAccessible ADMIN}}
           <button class="btn btn-padding btn-warning admin-disable-security-btn" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action notifySecurityOffPopup target="controller"}}>{{t admin.kerberos.button.disable}} </button>
           {{#unless isManualKerberos}}
-            <button class="btn btn-success" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action regenerateKeytabs target="controller"}}>
+            <button class="btn btn-success" id="regenerate-keytabs" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action regenerateKeytabs target="controller"}}>
               <i class="icon-repeat"></i> {{t admin.kerberos.button.regenerateKeytabs}}</button>
             {{#if App.supports.storeKDCCredentials}}
               <button class="btn btn-primary" {{action showManageKDCCredentialsPopup target="controller"}}>{{t admin.kerberos.credentials.store.menu.label}}</button>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/admin/stack_upgrade/edit_repositories.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/stack_upgrade/edit_repositories.hbs b/ambari-web/app/templates/main/admin/stack_upgrade/edit_repositories.hbs
index d90c23d..4ee3418 100644
--- a/ambari-web/app/templates/main/admin/stack_upgrade/edit_repositories.hbs
+++ b/ambari-web/app/templates/main/admin/stack_upgrade/edit_repositories.hbs
@@ -30,13 +30,13 @@
   </div>
 </div>
 {{#each os in view.content.operatingSystems}}
-  <div class="row-fluid os-block">
+  <div class="row-fluid os-block" {{bindAttr id="os.osType"}}>
     <div class="span2">
       {{os.osType}}
     </div>
     <div class="span10">
       {{#each repository in os.repositories}}
-        <div class="row-fluid">
+        <div {{bindAttr class="repository.repoName :row-fluid"}}>
           <div class="span3">{{repository.repoName}}</div>
           <div {{bindAttr class="repository.hasError:error :control-group :span9"}}>{{view App.BaseUrlTextField repositoryBinding="repository"}}</div>
         </div>
@@ -45,7 +45,7 @@
   </div>
 {{/each}}
 
-<div>
+<div id="skip-validation">
   <label>{{view view.skipCheckBox checkedBinding="view.parentView.skipValidation"}}{{t installer.step1.advancedRepo.skipValidation.message}}
     <i class="icon-question-sign" rel="skip-validation-tooltip"
        data-toggle="tooltip" {{translateAttr title="installer.step1.advancedRepo.skipValidation.tooltip"}}></i></label>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_version_box.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_version_box.hbs b/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_version_box.hbs
index be5df49..96d4544 100644
--- a/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_version_box.hbs
+++ b/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_version_box.hbs
@@ -76,19 +76,19 @@
 <div class="hosts-section">
   <div class="hosts-bar">{{t common.hosts}}</div>
   <div class="row-fluid host-link">
-    <div class="span4 align-center">
+    <div class="span4 align-center not-installed-hosts">
       <div><a href="#" class="hosts-tooltip not-active" {{bindAttr data-original-title="view.content.noInitHostsTooltip" }}
         {{action showHosts view.versionStateMap.not_installed target="view"}}>
         {{view.content.notInstalledHosts.length}}</a></div>
       <div>{{t admin.stackVersions.version.notInstalled}}</div>
     </div>
-    <div class="span4 align-center">
+    <div class="span4 align-center installed-hosts">
       <div><a href="#" class="hosts-tooltip not-active" {{bindAttr data-original-title="view.content.noInstalledHostsTooltip" }}
         {{action showHosts view.versionStateMap.installed target="view"}}>
         {{view.content.installedHosts.length}}</a></div>
       <div>{{t common.installed}}</div>
     </div>
-    <div class="span4 align-center">
+    <div class="span4 align-center current-hosts">
       <div><a href="#" class="hosts-tooltip not-active" {{bindAttr data-original-title="view.content.noCurrentHostsTooltip" }}
         {{action showHosts view.versionStateMap.current target="view"}}>
         {{view.content.currentHosts.length}}</a></div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/admin/stack_upgrade/versions.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/stack_upgrade/versions.hbs b/ambari-web/app/templates/main/admin/stack_upgrade/versions.hbs
index c08179e..0e69339 100644
--- a/ambari-web/app/templates/main/admin/stack_upgrade/versions.hbs
+++ b/ambari-web/app/templates/main/admin/stack_upgrade/versions.hbs
@@ -37,7 +37,9 @@
     </ul>
   </div>
   {{#isAccessible upgrade_ONLY_ADMIN}}
-    <button class="btn btn-primary pull-right" {{action goToVersions target="view"}}><i class="icon-external-link"></i>&nbsp;{{t admin.stackVersions.manageVersions}}</button>
+    <button class="btn btn-primary pull-right" {{action goToVersions target="view"}} id="manage-versions-link">
+      <i class="icon-external-link"></i>&nbsp;{{t admin.stackVersions.manageVersions}}
+    </button>
   {{/isAccessible}}
 </div>
 <div class="row-fluid">

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/alerts.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/alerts.hbs b/ambari-web/app/templates/main/alerts.hbs
index a5b168b..8e056c4 100644
--- a/ambari-web/app/templates/main/alerts.hbs
+++ b/ambari-web/app/templates/main/alerts.hbs
@@ -53,16 +53,16 @@
       {{#if view.pageContent}}
         {{#each alertDefinition in view.pageContent}}
           <tr>
-            <td class="first">
+            <td class="first alert-name">
               <span {{bindAttr title="alertDefinition.type"}} {{bindAttr class=":type-icon  alertDefinition.typeIconClass"}}></span>
               <a href="#" {{action "gotoAlertDetails" alertDefinition}}>{{alertDefinition.label}}</a>
             </td>
-            <td>{{{alertDefinition.status}}}</td>
-            <td>{{alertDefinition.serviceDisplayName}}</td>
-            <td>
+            <td class="alert-status">{{{alertDefinition.status}}}</td>
+            <td class="alert-service">{{alertDefinition.serviceDisplayName}}</td>
+            <td class="alert-time">
               <time class="timeago" {{bindAttr data-original-title="alertDefinition.lastTriggeredFormatted"}}>{{alertDefinition.lastTriggeredAgoFormatted}}</time>
             </td>
-            <td class="last toggle-state-button">
+            <td class="last toggle-state-button alert-state">
               {{#if alertDefinition.enabled}}
                 {{#isAccessible ADMIN}}
                   <a href="#" {{action "toggleState" alertDefinition target="controller"}} {{bindAttr class="alertDefinition.enabled:alert-definition-enable:alert-definition-disable"}}>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/alerts/manage_alert_notifications_popup.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/alerts/manage_alert_notifications_popup.hbs b/ambari-web/app/templates/main/alerts/manage_alert_notifications_popup.hbs
index ed18c63..b0fa709 100644
--- a/ambari-web/app/templates/main/alerts/manage_alert_notifications_popup.hbs
+++ b/ambari-web/app/templates/main/alerts/manage_alert_notifications_popup.hbs
@@ -63,13 +63,13 @@
             <div class="span12 pull-right">
               {{#if alertNotifications.length}}
                 {{#if selectedAlertNotification}}
-                  <div class="row-fluid">
+                  <div class="row-fluid notification-name">
                     <div class="span3 input-label">{{t common.name}}</div>
-                    <div class="span9">{{selectedAlertNotification.name}}</div>
+                    <div class="span9 input-value">{{selectedAlertNotification.name}}</div>
                   </div>
-                  <div class="row-fluid">
+                  <div class="row-fluid notification-groups">
                     <div class="span3 input-label">{{t common.groups}}</div>
-                    <div class="span9">
+                    <div class="span9 input-value">
                       {{#if selectedAlertNotification.global}}
                         {{t common.all}}
                       {{else}}
@@ -81,28 +81,28 @@
                       {{/if}}
                     </div>
                   </div>
-                  <div class="row-fluid">
+                  <div class="row-fluid notification-severity">
                     <div class="span3 input-label">{{t common.severity}}</div>
-                    <div class="span9">{{view.severities}}</div>
+                    <div class="span9 input-value">{{view.severities}}</div>
                   </div>
-                  <div class="row-fluid">
+                  <div class="row-fluid notification-method">
                     <div class="span3 input-label">{{t alerts.actions.manage_alert_notifications_popup.method}}</div>
-                    <div class="span9">{{selectedAlertNotification.type}}</div>
+                    <div class="span9 input-value">{{selectedAlertNotification.type}}</div>
                   </div>
                   {{#if view.showEmailDetails}}
-                    <div class="row-fluid">
+                    <div class="row-fluid notification-email">
                       <div class="span3 input-label">{{t alerts.actions.manage_alert_notifications_popup.email}}</div>
-                      <div class="span9">{{view.email}}</div>
+                      <div class="span9 input-value">{{view.email}}</div>
                     </div>
                   {{/if}}
                   {{#if view.showSNMPDetails}}
                   {{/if}}
-                  <div class="row-fluid">
+                  <div class="row-fluid notification-description">
                     <div class="span3 input-label">{{t common.description}}</div>
                     {{#if selectedAlertNotification.description}}
-                      <div class="span9 notification-description">{{selectedAlertNotification.description}}</div>
+                      <div class="span9 input-value">{{selectedAlertNotification.description}}</div>
                     {{else}}
-                      <div class="span9 notification-description">{{t alerts.actions.manage_alert_notifications_popup.noDescription}}</div>
+                      <div class="span9 input-value">{{t alerts.actions.manage_alert_notifications_popup.noDescription}}</div>
                     {{/if}}
                   </div>
                 {{/if}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/dashboard/config_history.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/dashboard/config_history.hbs b/ambari-web/app/templates/main/dashboard/config_history.hbs
index be50369..d20c514 100644
--- a/ambari-web/app/templates/main/dashboard/config_history.hbs
+++ b/ambari-web/app/templates/main/dashboard/config_history.hbs
@@ -28,11 +28,11 @@
     {{/view}}
 
     <tr class="filter-row config-history-filter-row">
-      <th class="first">{{view view.serviceFilterView}}</th>
-      <th>{{view view.configGroupFilterView}}</th>
-      <th>{{view view.modifiedFilterView}}</th>
-      <th>{{view view.authorFilterView}}</th>
-      <th>{{view view.notesFilterView}}</th>
+      <th class="first cg-service">{{view view.serviceFilterView}}</th>
+      <th class="cg-name">{{view view.configGroupFilterView}}</th>
+      <th class="cg-created">{{view view.modifiedFilterView}}</th>
+      <th class="cg-author">{{view view.authorFilterView}}</th>
+      <th class="cg-notes">{{view view.notesFilterView}}</th>
     </tr>
     </thead>
     <tbody class="services-menu">

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/dashboard/widgets/hbase_links.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/dashboard/widgets/hbase_links.hbs b/ambari-web/app/templates/main/dashboard/widgets/hbase_links.hbs
index dd78cc1..da9e2ce 100644
--- a/ambari-web/app/templates/main/dashboard/widgets/hbase_links.hbs
+++ b/ambari-web/app/templates/main/dashboard/widgets/hbase_links.hbs
@@ -27,7 +27,7 @@
         <div class="widget-content">
           <table>
             <!--hbase master server-->
-            <tr>
+            <tr class="hbase-master-link">
               <td>
                 {{#if view.activeMaster}}
                   <a href="#" {{action showDetails view.activeMaster.host}}>{{t dashboard.services.hbase.masterServer}}</a>
@@ -37,13 +37,13 @@
               </td>
             </tr>
             <!--region servers-->
-            <tr>
+            <tr class="hbase-regionserver-link">
               <td>
                 <a href="#" {{action filterHosts view.component}}>{{view.model.regionServersTotal}} {{t dashboard.services.hbase.regionServers}}</a>
               </td>
             </tr>
             <!--hbase master Web UI-->
-            <tr>
+            <tr class="hbase-web-ui-link">
               <td>
                 {{#if view.activeMaster}}
                   <a {{bindAttr href="view.hbaseMasterWebUrl"}}
@@ -96,4 +96,4 @@
       {{/if}}
     </li>
   </ul>
-</div>
\ No newline at end of file
+</div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/dashboard/widgets/hdfs_links.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/dashboard/widgets/hdfs_links.hbs b/ambari-web/app/templates/main/dashboard/widgets/hdfs_links.hbs
index 9be5ba4..847aa10 100644
--- a/ambari-web/app/templates/main/dashboard/widgets/hdfs_links.hbs
+++ b/ambari-web/app/templates/main/dashboard/widgets/hdfs_links.hbs
@@ -28,7 +28,7 @@
       {{#if view.isHAEnabled }}
         <table>
           <!--Active NameNode-->
-          <tr>
+          <tr class="active-namenode-link">
             {{#if view.isActiveNNValid}}
               <td><a href="#" {{action showDetails view.model.activeNameNode.host}}>{{t dashboard.widgets.HDFSLinks.activeNameNode}}</a></td>
             {{else}}
@@ -36,7 +36,7 @@
             {{/if}}
           </tr>
           <!--Standby NameNodes-->
-          <tr>
+          <tr class="standby-namenode-link">
             {{#if view.isStandbyNNValid}}
               {{#if view.isTwoStandbyNN}}
                 <!--Two Standby NameNodes-->
@@ -50,7 +50,7 @@
             {{/if}}
           </tr>
           <!--Data Nodes-->
-          <tr>
+          <tr class="datanode-link">
             <td>
               <a href="#" {{action filterHosts view.component}}>{{view.model.dataNodesTotal}} {{t dashboard.services.hdfs.datanodes}}</a>
             </td>
@@ -59,15 +59,15 @@
       {{else}}
         <table>
           <!--NameNode-->
-          <tr>
+          <tr class="namenode-link">
             <td><a href="#" {{action showDetails view.model.nameNode.host}}>{{t dashboard.services.hdfs.nanmenode}}</a></td>
           </tr>
           <!--SecondaryNameNode-->
-          <tr>
+          <tr class="secondary-namenode-link">
             <td><a href="#" {{action showDetails view.model.snameNode.host}}>{{t dashboard.services.hdfs.snanmenode}}</a></td>
           </tr>
           <!--Data Nodes-->
-          <tr>
+          <tr class="datanode-link">
             <td>
               <a href="#" {{action filterHosts view.component}}>{{view.model.dataNodesTotal}} {{t dashboard.services.hdfs.datanodes}}</a>
             </td>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/dashboard/widgets/yarn_links.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/dashboard/widgets/yarn_links.hbs b/ambari-web/app/templates/main/dashboard/widgets/yarn_links.hbs
index 5645b8c..739dab3 100644
--- a/ambari-web/app/templates/main/dashboard/widgets/yarn_links.hbs
+++ b/ambari-web/app/templates/main/dashboard/widgets/yarn_links.hbs
@@ -29,14 +29,14 @@
         <div class="widget-content">
           <table>
             <!-- Resource Manager-->
-            <tr>
+            <tr class="resourcemanager-link">
               <td>
                 <a href="#" {{action showDetails view.model.resourceManager.host}}> {{t dashboard.services.yarn.resourceManager}}</a>
               </td>
             </tr>
 
             <!-- Node Manages-->
-            <tr>
+            <tr class="nodemanager-link">
               <td>
                 <a href="#" {{action filterHosts view.component}}> {{view.model.nodeManagersTotal}} {{t dashboard.services.yarn.nodeManagers}}</a>
               </td>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/host/addHost/step4.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/host/addHost/step4.hbs b/ambari-web/app/templates/main/host/addHost/step4.hbs
index 0c39b48..8bad634 100644
--- a/ambari-web/app/templates/main/host/addHost/step4.hbs
+++ b/ambari-web/app/templates/main/host/addHost/step4.hbs
@@ -21,7 +21,7 @@
     {{t addHost.step4.title}}
   </div>
   <div class="pre-scrollable">
-      <table class="table">
+      <table class="table" id="host-configurations-table">
         <thead>
           <tr>
             <th>{{t common.service}}</th>
@@ -30,7 +30,7 @@
         </thead>
         <tbody>
           {{#each service in controller.content.configGroups}}
-            <tr>
+            <tr {{bindAttr id="service.serviceId"}}>
               <td>{{service.displayName}}</td>
               <td>
                 {{

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/host/details/host_component.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/host/details/host_component.hbs b/ambari-web/app/templates/main/host/details/host_component.hbs
index c9c6069..23cdb7c 100644
--- a/ambari-web/app/templates/main/host/details/host_component.hbs
+++ b/ambari-web/app/templates/main/host/details/host_component.hbs
@@ -16,7 +16,7 @@
 * limitations under the License.
 }}
 
-<div class="span7">
+<div class="span7 component-label">
   {{#if view.isUpgradeFailed}}
     {{#isAccessible ADMIN}}
       <a href="#" {{action "upgradeComponent" view.content target="controller"}} >

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/host/summary.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/host/summary.hbs b/ambari-web/app/templates/main/host/summary.hbs
index 85f43f0..aff0349 100644
--- a/ambari-web/app/templates/main/host/summary.hbs
+++ b/ambari-web/app/templates/main/host/summary.hbs
@@ -66,7 +66,7 @@
         {{/if}}
           {{!clients and add component button}}
           <div class="clients row-fluid">
-            <div class="span7 row">
+            <div class="span7 row client-list">
               {{#if view.clients.length}}
                   <div class="span3 align-right">{{t common.clients}}&nbsp;/</div>
                 <div class="span7">

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/service/info/configs.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/info/configs.hbs b/ambari-web/app/templates/main/service/info/configs.hbs
index 0ffad28..8f4eb0b 100644
--- a/ambari-web/app/templates/main/service/info/configs.hbs
+++ b/ambari-web/app/templates/main/service/info/configs.hbs
@@ -33,11 +33,11 @@
                 <span class="caret"></span>
               </button>
               <ul class="dropdown-menu">
-                <li>
+                <li class="restart-all-components">
                   <a href="#" {{action restartAllStaleConfigComponents target="controller"}}>{{t restart.service.all.affected}}</a>
                 </li>
                 {{#if view.rollingRestartSlaveComponentName}}
-                  <li>
+                  <li class="restart-slave-components">
                     <a href="#" {{action rollingRestartStaleConfigSlaveComponents view.rollingRestartSlaveComponentName target="controller"}}>{{view.rollingRestartActionName}}</a>
                   </li>
                 {{/if}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/service/info/summary.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/info/summary.hbs b/ambari-web/app/templates/main/service/info/summary.hbs
index af8078d..b295a3b 100644
--- a/ambari-web/app/templates/main/service/info/summary.hbs
+++ b/ambari-web/app/templates/main/service/info/summary.hbs
@@ -155,7 +155,7 @@
         <div {{bindAttr class=":accordion collapsedSection.toggleIndex"}}>
           <div class="accordion-group box">
             <div class="accordion-heading box-header">
-              <span class="pull-left accordion-toggle"
+              <span class="pull-left accordion-toggle metrics-header"
                     data-toggle="collapse" {{bindAttr dataParent="collapsedSection.toggleIndex" href="collapsedSection.toggleIndex"}}>
                 <i class="pull-left icon-caret-toggle"></i>
                 <span>{{collapsedSection.header}}</span>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/service/info/summary/client_components.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/info/summary/client_components.hbs b/ambari-web/app/templates/main/service/info/summary/client_components.hbs
index 7729ad2..42bdd44 100644
--- a/ambari-web/app/templates/main/service/info/summary/client_components.hbs
+++ b/ambari-web/app/templates/main/service/info/summary/client_components.hbs
@@ -23,7 +23,7 @@
         {{clientComponent.displayNamePluralized}}
       </a>
     </td>
-    <td>
+    <td class="summary-label">
       <span class="green-live">{{clientComponent.installedCount}}</span> {{clientComponent.displayNamePluralized}} {{t common.installed}}
     </td>
   </tr>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/service/info/summary/master_components.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/info/summary/master_components.hbs b/ambari-web/app/templates/main/service/info/summary/master_components.hbs
index 5d6c3a6..0f7553b 100644
--- a/ambari-web/app/templates/main/service/info/summary/master_components.hbs
+++ b/ambari-web/app/templates/main/service/info/summary/master_components.hbs
@@ -17,7 +17,7 @@
 }}
 <tr class="hidden"><td></td></tr>
 {{#each comp in view.mastersComp}}
-  <tr {{bindAttr class="comp.isSubComponent:component-small"}}>
+  <tr {{bindAttr class="comp.isSubComponent:component-small :component comp.componentName"}}>
     <td class="summary-label">
       <a href="#" {{action showDetails comp.host}} title="{{unbound comp.host.publicHostName}}" rel="UsageTooltip">
         {{#if comp.displayNameAdvanced}}
@@ -27,7 +27,7 @@
         {{/if}}
       </a>
     </td>
-    <td>
+    <td class="summary-value">
       <span rel='SummaryComponentHealthTooltip' {{bindAttr class="comp.statusClass comp.statusIconClass" data-original-title="comp.passiveTooltip"}}></span>
       {{comp.componentTextStatus}}
     </td>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/service/info/summary/slave_components.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/info/summary/slave_components.hbs b/ambari-web/app/templates/main/service/info/summary/slave_components.hbs
index e864ede..d37c783 100644
--- a/ambari-web/app/templates/main/service/info/summary/slave_components.hbs
+++ b/ambari-web/app/templates/main/service/info/summary/slave_components.hbs
@@ -18,10 +18,10 @@
 <tr class="hidden"><td></td></tr>
 
 {{#each slaveComponent in view.slavesObj}}
-  <tr>
+  <tr {{bindAttr class=":component slaveComponent.componentName"}}>
     <td class="summary-label"><a href="#" {{action filterHosts slaveComponent}}>{{slaveComponent.displayNamePluralized}}</a>
     </td>
-    <td>
+    <td class="summary-value">
       <span>
         {{#view App.ComponentLiveTextView liveComponentsBinding="slaveComponent.startedCount" totalComponentsBinding="slaveComponent.totalCount"}}
           {{view.liveComponents}}/{{view.totalComponents}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/service/item.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/item.hbs b/ambari-web/app/templates/main/service/item.hbs
index d7bff8c..b13b1c2 100644
--- a/ambari-web/app/templates/main/service/item.hbs
+++ b/ambari-web/app/templates/main/service/item.hbs
@@ -21,7 +21,7 @@
     {{#if view.service.quickLinks.length}}
       {{#view App.QuickViewLinks contentBinding="view.service"}}
         <ul class="nav nav-pills move">
-          <li class="dropdown">
+          <li class="dropdown quick-links-dropdown">
             <a class="dropdown-toggle" id="quick-links-dropdown-btn" data-toggle="dropdown" href="#">{{t common.quickLinks}}<b class="caret"></b></a>
             <ul class="dropdown-menu">
               {{#if view.isLoaded}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/service/manage_configuration_groups_popup.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/manage_configuration_groups_popup.hbs b/ambari-web/app/templates/main/service/manage_configuration_groups_popup.hbs
index c99622f..06e09ad 100644
--- a/ambari-web/app/templates/main/service/manage_configuration_groups_popup.hbs
+++ b/ambari-web/app/templates/main/service/manage_configuration_groups_popup.hbs
@@ -20,7 +20,7 @@
     <div class="row-fluid  manage-configuration-group-content">
         <div class="span12">
             <div class="row-fluid">
-                <div class="span4">
+                <div class="span4 group-list">
                     <span>&nbsp;</span>
                   {{view Em.Select
                   contentBinding="configGroups"
@@ -49,7 +49,7 @@
                         </div>
                     </div>
                 </div>
-                <div class="span8">
+                <div class="span8 host-list">
                     <span>&nbsp;</span>
                     <div class="row-fluid">
                         <div class="span12 pull-right">

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/service/reassign.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/reassign.hbs b/ambari-web/app/templates/main/service/reassign.hbs
index cdebce2..510fbda 100644
--- a/ambari-web/app/templates/main/service/reassign.hbs
+++ b/ambari-web/app/templates/main/service/reassign.hbs
@@ -25,13 +25,13 @@
           <div class="well">
             <ul class="nav nav-pills nav-stacked">
               <li class="nav-header">{{t services.reassign.header}}</li>
-              <li {{bindAttr class="isStep1:active view.isStep1Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep1 target="controller"}}>{{t services.reassign.step1.header}}</a></li>
-              <li {{bindAttr class="isStep2:active view.isStep2Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep2 target="controller"}}>{{t services.reassign.step2.header}}</a></li>
-              <li {{bindAttr class="isStep3:active view.isStep3Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep3 target="controller"}}>{{t services.reassign.step3.header}}</a></li>
-              <li {{bindAttr class="isStep4:active view.isStep4Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep4 target="controller"}}>{{t services.reassign.step4.header}}</a></li>
+              <li {{bindAttr class="isStep1:active view.isStep1Disabled:disabled :step1-link"}}><a href="javascript:void(null);"  {{action gotoStep1 target="controller"}}>{{t services.reassign.step1.header}}</a></li>
+              <li {{bindAttr class="isStep2:active view.isStep2Disabled:disabled :step2-link"}}><a href="javascript:void(null);"  {{action gotoStep2 target="controller"}}>{{t services.reassign.step2.header}}</a></li>
+              <li {{bindAttr class="isStep3:active view.isStep3Disabled:disabled :step3-link"}}><a href="javascript:void(null);"  {{action gotoStep3 target="controller"}}>{{t services.reassign.step3.header}}</a></li>
+              <li {{bindAttr class="isStep4:active view.isStep4Disabled:disabled :step4-link"}}><a href="javascript:void(null);"  {{action gotoStep4 target="controller"}}>{{t services.reassign.step4.header}}</a></li>
               {{#if controller.content.hasManualSteps}}
-              <li {{bindAttr class="isStep5:active view.isStep5Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep3 target="controller"}}>{{t services.reassign.step5.header}}</a></li>
-              <li {{bindAttr class="isStep6:active view.isStep6Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep4 target="controller"}}>{{t services.reassign.step6.header}}</a></li>
+              <li {{bindAttr class="isStep5:active view.isStep5Disabled:disabled :step5-link"}}><a href="javascript:void(null);"  {{action gotoStep3 target="controller"}}>{{t services.reassign.step5.header}}</a></li>
+              <li {{bindAttr class="isStep6:active view.isStep6Disabled:disabled :step6-link"}}><a href="javascript:void(null);"  {{action gotoStep4 target="controller"}}>{{t services.reassign.step6.header}}</a></li>
               {{/if}}
             </ul>
           </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/service/services/flume.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/services/flume.hbs b/ambari-web/app/templates/main/service/services/flume.hbs
index 40ea738..2905492 100644
--- a/ambari-web/app/templates/main/service/services/flume.hbs
+++ b/ambari-web/app/templates/main/service/services/flume.hbs
@@ -44,22 +44,22 @@
                   </td>
                   <td class="agent-status">
                     <div class="wrapp-flume-status">
-                      <div class="pull-left"><span {{bindAttr class="host.firtstAgent.healthClass"}}></span> {{host.firtstAgent.name}}</div>
-                      <div class="btn-wrapper">
-                        <div class="btn-group display-inline-block flume-agents-actions">
+                      <div class="pull-left flume-agents-status"><span {{bindAttr class="host.firtstAgent.healthClass"}}></span> {{host.firtstAgent.name}}</div>
+                      <div class="btn-wrapper flume-agents-actions">
+                        <div class="btn-group display-inline-block">
                             <a class="btn dropdown-toggle" data-toggle="dropdown" href="javascript:void(null)">{{host.firtstAgent.displayStatus}}
                                 <span class="caret"></span>
                             </a>
                             <ul class="pull-left dropdown-menu">
                                 <li {{bindAttr class="host.firtstAgent.isStartAgentDisabled:disabled"}}>
                                     <a href="javascript:void(null)"
-                                      {{bindAttr class="host.firtstAgent.isStartAgentDisabled:disabled"}}
+                                      {{bindAttr class="host.firtstAgent.isStartAgentDisabled:disabled :start-agent"}}
                                       {{action startFlumeAgent host.firtstAgent target="controller"}}>
                                       {{t services.service.summary.flume.startAgent}}</a>
                                 </li>
                                 <li {{bindAttr class="host.firtstAgent.isStopAgentDisabled:disabled"}}>
                                     <a href="javascript:void(null)"
-                                      {{bindAttr class="host.firtstAgent.isStopAgentDisabled:disabled"}}
+                                      {{bindAttr class="host.firtstAgent.isStopAgentDisabled:disabled :stop-agent"}}
                                       {{action stopFlumeAgent host.firtstAgent target="controller"}}>
                                       {{t services.service.summary.flume.stopAgent}}</a>
                                 </li>
@@ -82,14 +82,14 @@
                   {{#view view.agentView contentBinding="host"}}
                     <td class="agent-status">
                         <div class="wrapp-flume-status">
-                          <div class="pull-left"><span {{bindAttr class="agent.healthClass"}}></span> {{agent.name}}</div>
-                          <div class="btn-wrapper">
-                            <div class="btn-group display-inline-block flume-agents-actions">
+                          <div class="pull-left flume-agents-status"><span {{bindAttr class="agent.healthClass"}}></span> {{agent.name}}</div>
+                          <div class="btn-wrapper flume-agents-actions">
+                            <div class="btn-group display-inline-block">
                                 <a {{bindAttr class=":btn :dropdown-toggle"}} data-toggle="dropdown" href="javascript:void(null)">{{agent.displayStatus}}
                                     <span class="caret"></span>
                                 </a>
                                 <ul class="pull-left dropdown-menu">
-                                    <li {{bindAttr class="agent.isStartAgentDisabled:disabled"}}>
+                                    <li {{bindAttr class="agent.isStartAgentDisabled:disabled :start-agent"}}>
                                         <a href="javascript:void(null)"
                                           {{bindAttr class="agent.isStartAgentDisabled:disabled"}}
                                           {{action startFlumeAgent agent target="controller"}}>
@@ -97,7 +97,7 @@
                                     </li>
                                     <li {{bindAttr class="agent.isStopAgentDisabled:disabled"}}>
                                         <a href="javascript:void(null)"
-                                          {{bindAttr class="agent.isStopAgentDisabled:disabled"}}
+                                          {{bindAttr class="agent.isStopAgentDisabled:disabled :stop-agent"}}
                                           {{action stopFlumeAgent agent target="controller"}}>
                                           {{t services.service.summary.flume.stopAgent}}</a>
                                     </li>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/service/services/hbase.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/services/hbase.hbs b/ambari-web/app/templates/main/service/services/hbase.hbs
index dccc125..68e0224 100644
--- a/ambari-web/app/templates/main/service/services/hbase.hbs
+++ b/ambari-web/app/templates/main/service/services/hbase.hbs
@@ -19,11 +19,11 @@
 <!-- HBase Master Server -->
 {{view view.dashboardMasterComponentView}}
 <!-- RegionServers -->
-<tr>
+<tr {{bindAttr class=":component view.regionServerComponent.componentName"}}>
   <td class="summary-label"><a
           href="#" {{action filterHosts view.regionServerComponent}}>{{t dashboard.services.hbase.regionServers}}</a>
   </td>
-  <td>
+  <td class="summary-value">
     {{#if App.router.clusterController.isServiceContentFullyLoaded}}
       <span>
         {{#view App.ComponentLiveTextView liveComponentsBinding="view.service.regionServersStarted" totalComponentsBinding="view.service.regionServersTotal"}}
@@ -38,11 +38,11 @@
 </tr>
 <!-- PhoenixServers -->
 {{#if view.showPhoenixInfo}}
-  <tr>
+  <tr {{bindAttr class=":component view.phoenixServerComponent.componentName"}}>
     <td class="summary-label"><a
             href="#" {{action filterHosts view.phoenixServerComponent}}>{{t dashboard.services.hbase.phoenixServers}}</a>
     </td>
-    <td>
+    <td class="summary-value">
       <span>
         {{#view App.ComponentLiveTextView liveComponentsBinding="view.service.phoenixServersStarted" totalComponentsBinding="view.service.phoenixServersTotal"}}
           {{#if App.router.clusterController.isServiceContentFullyLoaded}}
@@ -55,9 +55,9 @@
   </tr>
 {{/if}}
 <!-- Regions in Transition -->
-<tr>
+<tr class="regions-in-trnasition">
   <td class="summary-label">{{t dashboard.services.hbase.regions.transition}}</td>
-  <td>{{view.service.regionsInTransition}}</td>
+  <td class="summary-value">{{view.service.regionsInTransition}}</td>
 </tr>
 
 <!-- Divider-- make the remaining summary info on 2nd table-->
@@ -69,23 +69,23 @@
     <tbody>
 
     <!-- HBase Master Started Time -->
-    <tr>
+    <tr class="started-time">
       <td class="summary-label">{{t dashboard.services.hbase.masterStarted}}</td>
-      <td>{{view.masterStartedTime}}</td>
+      <td class="summary-value">{{view.masterStartedTime}}</td>
     </tr>
     <!-- HBase Master Activated Time -->
-    <tr>
+    <tr class="activated-time">
       <td class="summary-label">{{t dashboard.services.hbase.masterActivated}}</td>
-      <td>{{view.masterStartedTime}}</td>
+      <td class="summary-value">{{view.masterStartedTime}}</td>
     </tr>
     <!-- Average Load -->
-    <tr>
+    <tr class="average-load">
       <td class="summary-label">{{t dashboard.services.hbase.averageLoad}}</td>
-      <td>{{view.averageLoad}}</td>
+      <td class="summary-value">{{view.averageLoad}}</td>
     </tr>
     <!-- Master Server Heap -->
-    <tr>
+    <tr class="server-heap">
       <td class="summary-label">{{t dashboard.services.hbase.masterServerHeap}}</td>
-      <td>{{view.masterServerHeapSummary}}</td>
+      <td class="summary-value">{{view.masterServerHeapSummary}}</td>
     </tr>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/service/services/hdfs.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/services/hdfs.hbs b/ambari-web/app/templates/main/service/services/hdfs.hbs
index 04a098b..4f1f820 100644
--- a/ambari-web/app/templates/main/service/services/hdfs.hbs
+++ b/ambari-web/app/templates/main/service/services/hdfs.hbs
@@ -18,10 +18,11 @@
 
 {{view view.dashboardMasterComponentView}}
 <!-- Data Nodes -->
-<tr>
+<tr {{bindAttr class=":component view.dataNodeComponent.componentName"}}>
   <td class="summary-label"><a
-          href="#" {{action filterHosts view.dataNodeComponent}}>{{t dashboard.services.hdfs.datanodes}}</a></td>
-  <td>
+          href="#" {{action filterHosts view.dataNodeComponent}}>{{t dashboard.services.hdfs.datanodes}}</a>
+  </td>
+  <td class="summary-value">
     {{#if App.router.clusterController.isServiceContentFullyLoaded}}
       <span>
         {{#view App.ComponentLiveTextView liveComponentsBinding="view.service.dataNodesStarted" totalComponentsBinding="view.service.dataNodesTotal"}}
@@ -35,12 +36,12 @@
   </td>
 </tr>
 <!-- Data Node Counts -->
-<tr>
+<tr class="datanode-count">
   <td class="summary-label">{{t dashboard.services.hdfs.datanodecounts}}</td>
   {{#if view.service.metricsNotAvailable}}
-    <td>{{t services.service.summary.notAvailable}}</td>
+    <td class="summary-value">{{t services.service.summary.notAvailable}}</td>
   {{else}}
-    <td>
+    <td class="summary-value">
       <span {{translateAttr data-original-title="dashboard.services.hdfs.datanode.status.tooltip.live" }}
               rel="tooltip">{{view.service.liveDataNodes.length}} {{t dashboard.services.hdfs.nodes.live}} </span> /
       <span {{translateAttr data-original-title="dashboard.services.hdfs.datanode.status.tooltip.dead" }}
@@ -52,11 +53,11 @@
 </tr>
 <!-- JournalNodes -->
 {{#if view.showJournalNodes}}
-  <tr>
-    <td class="summary-label"><a
-            href="#" {{action filterHosts view.journalNodeComponent}}>{{t dashboard.services.hdfs.journalnodes}}</a>
+  <tr {{bindAttr class=":component view.journalNodeComponent.componentName"}}>
+    <td class="summary-label">
+      <a href="#" {{action filterHosts view.journalNodeComponent}}>{{t dashboard.services.hdfs.journalnodes}}</a>
     </td>
-    <td>
+    <td class="summary-value">
       {{#if App.router.clusterController.isComponentsStateLoaded}}
         <span>
           {{#view App.ComponentLiveTextView liveComponentsBinding="view.journalNodesLive" totalComponentsBinding="view.journalNodesTotal"}}
@@ -72,10 +73,11 @@
 {{/if}}
 <!-- NFS Gateway -->
 {{#if view.isNfsInStack}}
-  <tr>
-    <td class="summary-label"><a
-            href="#" {{action filterHosts view.nfsGatewayComponent}}>{{t dashboard.services.hdfs.nfsgateways}}</a></td>
-    <td>
+  <tr {{bindAttr class=":component view.nfsGatewayComponent.componentName"}}>
+    <td class="summary-label">
+      <a href="#" {{action filterHosts view.nfsGatewayComponent}}>{{t dashboard.services.hdfs.nfsgateways}}</a>
+    </td>
+    <td class="summary-value">
       {{#if App.router.clusterController.isServiceContentFullyLoaded}}
         <span>
           {{#view App.ComponentLiveTextView liveComponentsBinding="view.service.nfsGatewaysStarted" totalComponentsBinding="view.service.nfsGatewaysTotal"}}
@@ -90,23 +92,23 @@
   </tr>
 {{/if}}
 <!-- NameNode Uptime -->
-<tr>
+<tr class="namenode-uptime">
   <td class="summary-label">{{t dashboard.services.hdfs.nodes.uptime}}</td>
-  <td>{{view.nodeUptime}}</td>
+  <td class="summary-value">{{view.nodeUptime}}</td>
 </tr>
 <!-- NameNode Heap -->
-<tr>
+<tr class="namenode-heap">
   <td class="summary-label">{{t dashboard.services.hdfs.nodes.heap}}</td>
-  <td>{{view.nodeHeap}}</td>
+  <td class="summary-value">{{view.nodeHeap}}</td>
 </tr>
 <!-- HDFS Capacity (Disk Usage)-->
-<tr>
+<tr class="dfs-usage">
   <td class="summary-label">{{t dashboard.services.hdfs.capacity.dfsUsed}}</td>
-  <td>{{view.dfsUsedDisk}}</td>
+  <td class="summary-value">{{view.dfsUsedDisk}}</td>
 </tr>
-<tr>
+<tr class="non-dfs-used">
   <td class="summary-label">{{t dashboard.services.hdfs.capacity.nonDfsUsed}}</td>
-  <td>{{view.nonDfsUsedDisk}}</td>
+  <td class="summary-value">{{view.nonDfsUsedDisk}}</td>
 </tr>
 
 <!-- Divider-- make the remaining summary info on 2nd table-->
@@ -117,38 +119,38 @@
   <table class="summary-info table no-borders table-condensed">
     <tbody>
 
-    <tr>
+    <tr class="capacity-remaining">
       <td class="summary-label">{{t dashboard.services.hdfs.capacity.remaining}}</td>
       <td>{{view.remainingDisk}}</td>
     </tr>
     <!-- Blocks Total -->
-    <tr>
+    <tr class="blocks-total">
       <td class="summary-label">{{t services.service.summary.blocksTotal}}</td>
       <td>{{view.dfsTotalBlocks}}</td>
     </tr>
     <!-- Block Errors -->
-    <tr>
+    <tr class="block-errors">
       <td class="summary-label">{{t services.service.summary.blockErrors}}</td>
       <td>
         {{view.blockErrorsMessage}}
       </td>
     </tr>
     <!-- Total Files And Directories -->
-    <tr>
+    <tr class="total-files-dirs">
       <td class="summary-label">{{t dashboard.services.hdfs.totalFilesAndDirs}}</td>
       <td>{{view.dfsTotalFiles}}</td>
     </tr>
     <!-- Upgrade Status -->
-    <tr>
+    <tr class="upgrade-status">
       <td class="summary-label">{{t services.service.summary.pendingUpgradeStatus}}</td>
       <td>
         <span {{bindAttr class="view.isUpgradeStatusWarning:upgrade-status-warning"}}>{{view.upgradeStatus}}</span>
       </td>
     </tr>
     <!-- Safe Mode Status -->
-    <tr>
+    <tr class="safe-mode-status">
       <td class="summary-label">{{t services.service.summary.safeModeStatus}}</td>
       <td>
         {{view.safeModeStatus}}
       </td>
-    </tr>
\ No newline at end of file
+    </tr>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/service/services/ranger.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/services/ranger.hbs b/ambari-web/app/templates/main/service/services/ranger.hbs
index 2617002..bdd8269 100644
--- a/ambari-web/app/templates/main/service/services/ranger.hbs
+++ b/ambari-web/app/templates/main/service/services/ranger.hbs
@@ -20,9 +20,9 @@
 
 {{#each item in controller.rangerPlugins}}
   {{#if item.isDisplayed}}
-    <tr>
+    <tr class="ranger-plugin">
       <td class="summary-label">{{item.pluginTitle}}</td>
-      <td>{{item.status}}</td>
+      <td class="summary-value">{{item.status}}</td>
     </tr>
   {{/if}}
 {{/each}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/service/services/storm.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/services/storm.hbs b/ambari-web/app/templates/main/service/services/storm.hbs
index e54b3f5..2c552fc 100644
--- a/ambari-web/app/templates/main/service/services/storm.hbs
+++ b/ambari-web/app/templates/main/service/services/storm.hbs
@@ -17,13 +17,13 @@
 }}
 
 {{view App.SummaryMasterComponentsView mastersCompBinding="view.parentView.mastersObj"}}
-<tr>
+<tr class="component SUPERVISOR">
   <td class="summary-label">
     <a href="#" {{action filterHosts view.filterComponent}}>
       {{t dashboard.services.storm.supervisors}}
     </a>
   </td>
-  <td>
+  <td class="summary-value">
     {{#if App.router.clusterController.isServiceContentFullyLoaded}}
       <span>
         {{#view App.ComponentLiveTextView liveComponentsBinding="view.superVisorsLive" totalComponentsBinding="view.superVisorsTotal"}}
@@ -44,44 +44,44 @@
   <table class="summary-info table no-borders table-condensed">
     <tbody>
 
-    <tr>
+    <tr class="storm-slots">
       <td class="summary-label">
         {{t services.service.summary.storm.freeslots}}
       </td>
-      <td>
+      <td class="summary-value">
         {{formatNull view.service.freeSlots}} / {{formatNull view.service.totalSlots}}
         ({{formatNull view.freeSlotsPercentage empty="0"}}% {{t common.free}})
       </td>
     </tr>
-    <tr>
+    <tr class="storm-tasks">
       <td class="summary-label">
         {{t services.service.summary.storm.tasks}}
       </td>
-      <td>
+      <td class="summary-value">
         {{formatNull view.service.totalTasks}}
       </td>
     </tr>
-    <tr>
+    <tr class="storm-executors">
       <td class="summary-label">
         {{t services.service.summary.storm.executors}}
       </td>
-      <td>
+      <td class="summary-value">
         {{formatNull view.service.totalExecutors}}
       </td>
     </tr>
-    <tr>
+    <tr class="storm-topologies">
       <td class="summary-label">
         {{t services.service.summary.storm.topologies}}
       </td>
-      <td>
+      <td class="summary-value">
         {{formatNull view.service.topologies}}
       </td>
     </tr>
-    <tr>
+    <tr class="nimbus-uptime">
       <td class="summary-label">
         {{t services.service.summary.storm.nimbus.uptime}}
       </td>
-      <td>
+      <td class="summary-value">
         {{view.nimbusUptimeFormatted}}
       </td>
     </tr>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/main/service/services/yarn.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/services/yarn.hbs b/ambari-web/app/templates/main/service/services/yarn.hbs
index cd61ee7..401b4a7 100644
--- a/ambari-web/app/templates/main/service/services/yarn.hbs
+++ b/ambari-web/app/templates/main/service/services/yarn.hbs
@@ -18,10 +18,11 @@
 {{view view.dashboardMasterComponentView}}
 
 <!-- NodeManagers -->
-<tr>
-  <td class="summary-label"><a
-          href="#" {{action filterHosts view.nodeManagerComponent}}>{{t dashboard.services.yarn.nodeManagers}}</a></td>
-  <td>
+<tr {{bindAttr class=":component view.nodeManagerComponent.componentName"}}>
+  <td class="summary-label">
+    <a href="#" {{action filterHosts view.nodeManagerComponent}}>{{t dashboard.services.yarn.nodeManagers}}</a>
+  </td>
+  <td class="summary-value">
     {{#if App.router.clusterController.isServiceContentFullyLoaded}}
       <span>
         {{#view App.ComponentLiveTextView liveComponentsBinding="view.service.nodeManagersStarted" totalComponentsBinding="view.service.nodeManagersTotal"}}
@@ -34,24 +35,24 @@
   </td>
 </tr>
 <!-- NodeManagers status -->
-<tr>
+<tr class="nodemanager-status">
   <td class="summary-label">{{t dashboard.services.yarn.nodeManagers.status}}</td>
-  <td>{{view.nodeManagersStatus}}</td>
+  <td class="summary-value">{{view.nodeManagersStatus}}</td>
 </tr>
 <!-- YARN Clients -->
-<tr>
+<tr {{bindAttr class=":component view.yarnClientComponent.componentName"}}>
   <td class="summary-label"><a {{action filterHosts view.yarnClientComponent}}
           href="javascript:void(null)">{{pluralize view.service.installedClients singular="t:dashboard.services.yarn.client" plural="t:dashboard.services.yarn.clients"}}</a>
   </td>
-  <td>
+  <td class="summary-value">
     <span class="green-live">{{view.service.installedClients}} </span>
     {{pluralize view.service.installedClients singular="t:dashboard.services.yarn.client" plural="t:dashboard.services.yarn.clients"}} {{t common.installed}}
   </td>
 </tr>
 <!-- ResourceManager Uptime -->
-<tr>
+<tr class="resourcemanager-uptime">
   <td class="summary-label">{{t dashboard.services.yarn.resourceManager.uptime}}</td>
-  <td>{{view.nodeUptime}}</td>
+  <td class="summary-value">{{view.nodeUptime}}</td>
 </tr>
 
 <!-- Divider-- make the remaining summary info on 2nd table-->
@@ -63,29 +64,31 @@
     <tbody>
 
     <!-- ResourceManager Heap -->
-    <tr>
+    <tr class="resourcemanager-heap">
       <td class="summary-label">{{t dashboard.services.resourceManager.nodes.heap}}</td>
-      <td>{{view.nodeHeap}}</td>
+      <td class="summary-value">{{view.nodeHeap}}</td>
     </tr>
     <!-- Containers -->
-    <tr>
+    <tr class="yarn-containers">
       <td class="summary-label">{{t dashboard.services.yarn.containers}}</td>
-      <td>{{view.containers}}</td>
+      <td class="summary-value">{{view.containers}}</td>
     </tr>
     <!-- Applications -->
-    <tr>
+    <tr class="yarn-applications">
       <td class="summary-label">{{t dashboard.services.yarn.apps}}</td>
-      <td>{{view.apps}}</td>
+      <td class="summary-value">{{view.apps}}</td>
     </tr>
     <!-- Memory -->
-    <tr>
+    <tr class="yarn-memory">
       <td class="summary-label">{{t dashboard.services.yarn.memory}}</td>
-      <td>{{view.memory}}</td>
+      <td class="summary-value">{{view.memory}}</td>
     </tr>
     <!-- Queues -->
-    <tr>
+    <tr class="yarn-queues">
       <td class="summary-label">{{t dashboard.services.yarn.queues}}</td>
-      <td><span rel="queue-tooltip"
-                class="text-tooltip" {{bindAttr data-original-title="view.service.queueFormatted" }}>{{view.queues}}</span>
+      <td class="summary-value">
+        <span rel="queue-tooltip" class="text-tooltip" {{bindAttr data-original-title="view.service.queueFormatted" }}>
+          {{view.queues}}
+        </span>
       </td>
     </tr>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/wizard/step4.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/step4.hbs b/ambari-web/app/templates/wizard/step4.hbs
index 759e9b9..cc4c68f 100644
--- a/ambari-web/app/templates/wizard/step4.hbs
+++ b/ambari-web/app/templates/wizard/step4.hbs
@@ -25,17 +25,17 @@
   <table class="table table-striped">
     <thead>
     <tr>
-      <th class="span3">
+      <th class="span3" id="service-name">
           {{view Ember.Checkbox disabledBinding="isInstalled" checkedBinding="isAllChecked"}}{{t common.service}}
       </th>
-      <th>{{t common.version}}</th>
-      <th>{{t common.description}}</th>
+      <th id="service-version">{{t common.version}}</th>
+      <th id="service-description">{{t common.description}}</th>
     </tr>
     </thead>
     <tbody>
     {{#each controller}}
       {{#unless isHiddenOnSelectServicePage}}
-        <tr {{bindAttr class="isSelected:success:"}}>
+        <tr {{bindAttr class="isSelected:success isSelected:service-selected"}}>
           <td><label class="checkbox">{{view Ember.Checkbox classBinding="serviceName"
           disabledBinding="isInstalled"
           checkedBinding="isSelected"}}{{displayNameOnSelectServicePage}}</label>
@@ -54,4 +54,4 @@
     {{/unless}}
     <a class="btn btn-success pull-right" {{bindAttr disabled="isSubmitDisabled"}} {{action submit target="controller"}}> {{t common.next}} &rarr;</a>
   </div>
-</div>
\ No newline at end of file
+</div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/wizard/step6.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/step6.hbs b/ambari-web/app/templates/wizard/step6.hbs
index 29ed76c..36d93f9 100644
--- a/ambari-web/app/templates/wizard/step6.hbs
+++ b/ambari-web/app/templates/wizard/step6.hbs
@@ -32,13 +32,13 @@
     <table class="table" id="component_assign_table">
       <thead>
       <tr>
-        <th>{{t common.host}}</th>
+        <th class="host-column">{{t common.host}}</th>
         {{#each header in controller.headers}}
 
-          <th>
-            <a href="#" {{bindAttr class="header.allChecked:selected:deselected header.isDisabled:remove-link"}}
+          <th {{bindAttr class="header.name"}}>
+            <a href="#" {{bindAttr class="header.allChecked:selected:deselected header.isDisabled:remove-link" id="header.allId"}}
               {{action "selectAllNodes" header target="controller"}}>{{t all}}</a> &nbsp;|&nbsp; <a
-                  href="#" {{bindAttr class="header.noChecked:selected:deselected header.isDisabled:remove-link"}}
+                  href="#" {{bindAttr class="header.noChecked:selected:deselected header.isDisabled:remove-link" id="header.noneId"}}
             {{action "deselectAllNodes" header target="controller"}}>{{t none}}</a>
           </th>
 
@@ -55,7 +55,7 @@
               {{/if}}
             {{/view}}
             {{#each checkbox in host.checkboxes}}
-              <td {{bindAttr class="checkbox.hasErrorMessage:error checkbox.hasWarnMessage:warning"}}>
+              <td {{bindAttr class="checkbox.hasErrorMessage:error checkbox.hasWarnMessage:warning checkbox.component"}}>
                 <label class="checkbox">
                   <input {{bindAttr checked = "checkbox.checked" disabled="checkbox.isDisabled"}} {{action "checkboxClick" checkbox target="view" }}
                           type="checkbox"/>{{checkbox.title}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/templates/wizard/step9.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/step9.hbs b/ambari-web/app/templates/wizard/step9.hbs
index 671efaa..e65d4e1 100644
--- a/ambari-web/app/templates/wizard/step9.hbs
+++ b/ambari-web/app/templates/wizard/step9.hbs
@@ -90,7 +90,7 @@
                 </div>
                 <div class="progress-percentage pull-left">{{host.progress}}%</div>
               </td>
-              <td>
+              <td class="host-message">
                 <a {{bindAttr class="view.isFailed:text-error view.isSuccess:text-success view.isWarning:text-warning"}}
                     href="javascript:void(null)"
                     data-toggle="modal" {{action hostLogPopup target="view"}}>{{host.message}}</a>
@@ -141,4 +141,4 @@
     </div>
   </div>
 
-</div>
\ No newline at end of file
+</div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index 52f3aca..7fc1a23 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -2872,6 +2872,7 @@ var ajax = Em.Object.extend({
 
     if (showStatus.contains(jqXHR.status) && !this.get('modalPopup')) {
       this.set('modalPopup', App.ModalPopup.show({
+        elementId: 'default-error-modal',
         header: Em.I18n.t('common.error'),
         secondary: false,
         onPrimary: function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/utils/hosts.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/hosts.js b/ambari-web/app/utils/hosts.js
index 20acad5..90b746c 100644
--- a/ambari-web/app/utils/hosts.js
+++ b/ambari-web/app/utils/hosts.js
@@ -53,6 +53,8 @@ module.exports = {
 
       classNames: [ 'sixty-percent-width-modal' ],
 
+      elementId: 'host-selection-dialog',
+
       header: popupDescription.header,
 
       dialogMessage: popupDescription.dialogMessage,

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/views/common/assign_master_components_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/assign_master_components_view.js b/ambari-web/app/views/common/assign_master_components_view.js
index d96eca2..f2c0ab0 100644
--- a/ambari-web/app/views/common/assign_master_components_view.js
+++ b/ambari-web/app/views/common/assign_master_components_view.js
@@ -150,6 +150,14 @@ App.SelectHostView = Em.Select.extend(App.SelectHost, {
 App.AddControlView = Em.View.extend({
 
   /**
+   * DOM node class attribute
+   * @type {string}
+   */
+  uniqueId: function() {
+    return this.get('componentName') + '-add';
+  }.property('componentName'),
+
+  /**
    * Current component name
    * @type {string}
    */
@@ -159,6 +167,8 @@ App.AddControlView = Em.View.extend({
 
   classNames: ["badge", "badge-important"],
 
+  classNameBindings: ['uniqueId'],
+
   template: Em.Handlebars.compile('+'),
 
   /**
@@ -172,6 +182,15 @@ App.AddControlView = Em.View.extend({
 });
 
 App.RemoveControlView = Em.View.extend({
+  /**
+   * DOM node class attribute
+   * @type {string}
+   */
+  uniqueId: function() {
+    return this.get('componentName') + '-' + this.get('serviceComponentId') + '-remove';
+  }.property('componentName', 'serviceComponentId'),
+
+  classNameBindings: ['uniqueId'],
 
   /**
    * Index for multiple component

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/views/common/configs/compare_property_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/compare_property_view.js b/ambari-web/app/views/common/configs/compare_property_view.js
index 900d374..b8b022d 100644
--- a/ambari-web/app/views/common/configs/compare_property_view.js
+++ b/ambari-web/app/views/common/configs/compare_property_view.js
@@ -19,6 +19,7 @@
 var App = require('app');
 
 App.ServiceConfigView.SCPComparisonRowsView = Ember.View.extend({
+  classNames: ['compare-value'],
   templateName: require('templates/common/configs/compare_property'),
   serviceConfigProperty: null
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/views/common/configs/overriddenProperty_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/overriddenProperty_view.js b/ambari-web/app/views/common/configs/overriddenProperty_view.js
index 0e8c8e7..2dfe7d7 100644
--- a/ambari-web/app/views/common/configs/overriddenProperty_view.js
+++ b/ambari-web/app/views/common/configs/overriddenProperty_view.js
@@ -19,6 +19,7 @@
 var App = require('app');
 
 App.ServiceConfigView.SCPOverriddenRowsView = Ember.View.extend({
+  classNames: ['overriden-value'],
   templateName: require('templates/common/configs/overriddenProperty'),
   serviceConfigProperty: null, // is passed dynamically at runtime where ever
   // we are declaring this from configs.hbs ( we are initializing this from UI )

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/views/common/modal_popups/dependent_configs_list_popup.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/modal_popups/dependent_configs_list_popup.js b/ambari-web/app/views/common/modal_popups/dependent_configs_list_popup.js
index 6442838..684598e 100644
--- a/ambari-web/app/views/common/modal_popups/dependent_configs_list_popup.js
+++ b/ambari-web/app/views/common/modal_popups/dependent_configs_list_popup.js
@@ -32,6 +32,7 @@ App.showDependentConfigsPopup = function (configs, primary, secondary) {
     header: Em.I18n.t('popup.dependent.configs.header'),
     classNames: ['sixty-percent-width-modal','modal-full-width'],
     configs: configs,
+    secondaryClass: 'cancel-button',
     bodyClass: Em.View.extend({
       templateName: require('templates/common/modal_popups/dependent_configs_list'),
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d259917d/ambari-web/app/views/wizard/step3/hostWarningPopupBody_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/wizard/step3/hostWarningPopupBody_view.js b/ambari-web/app/views/wizard/step3/hostWarningPopupBody_view.js
index b9d43a5..9c6454e 100644
--- a/ambari-web/app/views/wizard/step3/hostWarningPopupBody_view.js
+++ b/ambari-web/app/views/wizard/step3/hostWarningPopupBody_view.js
@@ -33,6 +33,7 @@ App.WizardStep3HostWarningPopupBody = Em.View.extend({
    * @type {Ember.Select}
    */
   hostSelectView: Em.Select.extend({
+    elementId: 'hosts-check-select',
 
     selectionBinding: "parentView.category",
 
@@ -479,4 +480,4 @@ App.WizardStep3HostWarningPopupBody = Em.View.extend({
     newWindow.focus();
   }
 
-});
\ No newline at end of file
+});


[37/50] [abbrv] ambari git commit: AMBARI-13534. Derived properties when Ranger plugin is enabled should be recommended by stack advisor. (jaimin)

Posted by nc...@apache.org.
AMBARI-13534. Derived properties when Ranger plugin is enabled should be recommended by stack advisor. (jaimin)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3864bc16
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3864bc16
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3864bc16

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 3864bc161ecde5845e6cf105412189a7b1ba3574
Parents: b2f306d
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Thu Oct 22 17:29:48 2015 -0700
Committer: Jaimin Jetly <ja...@hortonworks.com>
Committed: Thu Oct 22 17:30:52 2015 -0700

----------------------------------------------------------------------
 .../0.8.1.2.2/configuration/kafka-log4j.xml     |   6 +
 .../KNOX/0.5.0.2.2/configuration/topology.xml   |   6 +
 .../services/HBASE/configuration/hbase-site.xml |  77 +++++++
 .../services/STORM/configuration/storm-site.xml |  12 ++
 .../stacks/HDP/2.2/services/stack_advisor.py    | 104 +++++++++-
 .../services/HDFS/configuration/hdfs-site.xml   |  15 ++
 .../KAFKA/configuration/kafka-broker.xml        |  13 ++
 .../services/YARN/configuration/yarn-site.xml   |  24 +++
 .../stacks/HDP/2.3/services/stack_advisor.py    |  95 ++++++++-
 .../stacks/2.2/common/test_stack_advisor.py     | 128 +++++++++++-
 .../stacks/2.3/common/test_stack_advisor.py     | 204 +++++++++++++++++++
 .../configs/modification_handlers/hbase.js      | 107 ----------
 .../utils/configs/modification_handlers/hdfs.js |  55 -----
 .../configs/modification_handlers/kafka.js      |  71 -------
 .../utils/configs/modification_handlers/knox.js |  67 ------
 .../configs/modification_handlers/storm.js      |  70 -------
 .../utils/configs/modification_handlers/yarn.js |  71 -------
 17 files changed, 659 insertions(+), 466 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-log4j.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-log4j.xml
index e18732d..e8e785f 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-log4j.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-log4j.xml
@@ -114,6 +114,12 @@ log4j.additivity.state.change.logger=false
     <value-attributes>
       <show-property-name>false</show-property-name>
     </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-kafka-plugin-properties</type>
+        <name>ranger-kafka-plugin-enabled</name>
+      </property>
+    </depends-on>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/topology.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/topology.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/topology.xml
index eae1f34..636de97 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/topology.xml
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/topology.xml
@@ -122,5 +122,11 @@
        <empty-value-valid>true</empty-value-valid>
        <show-property-name>false</show-property-name>
     </value-attributes>
+        <depends-on>
+            <property>
+                <type>ranger-knox-plugin-properties</type>
+                <name>ranger-knox-plugin-enabled</name>
+            </property>
+        </depends-on>
     </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-site.xml
index cdb0391..3c9b390 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-site.xml
@@ -230,4 +230,81 @@
       <increment-step>0.01</increment-step>
     </value-attributes>
   </property>
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value></value>
+    <description>A comma-separated list of
+      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+      loaded by default on the active HMaster process. For any implemented
+      coprocessor methods, the listed classes will be called in order. After
+      implementing your own MasterObserver, just put it in HBase's classpath
+      and add the fully qualified class name here.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authorization</name>
+      </property>
+      <property>
+        <type>ranger-hbase-plugin-properties</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value>org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint</value>
+    <description>A comma-separated list of Coprocessors that are loaded by
+      default on all tables. For any override coprocessor method, these classes
+      will be called in order. After implementing your own Coprocessor, just put
+      it in HBase's classpath and add the fully qualified class name here.
+      A coprocessor can also be loaded on demand by setting HTableDescriptor.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authorization</name>
+      </property>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authentication</name>
+      </property>
+      <property>
+        <type>ranger-hbase-plugin-properties</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hbase.security.authorization</name>
+    <value>false</value>
+    <description> Set Authorization Method.</description>
+    <display-name>Enable Authorization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Native</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-hbase-plugin-properties</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+    </depends-on>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-site.xml
index 00a1391..29dc700 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-site.xml
@@ -109,4 +109,16 @@
     <value>{{log_dir}}</value>
     <description>Log directory for Storm.</description>
   </property>
+
+  <property>
+    <name>nimbus.authorizer</name>
+    <value>backtype.storm.security.auth.authorizer.SimpleACLAuthorizer</value>
+    <description>Log directory for Storm.</description>
+    <depends-on>
+      <property>
+        <type>ranger-storm-plugin-properties</type>
+        <name>ranger-storm-plugin-enabled</name>
+      </property>
+    </depends-on>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index cf9c91e..707a641 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -23,6 +23,7 @@ from urlparse import urlparse
 import os
 import fnmatch
 import socket
+import re
 
 class HDP22StackAdvisor(HDP21StackAdvisor):
 
@@ -569,13 +570,20 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
               ('hbase-site' in services['configurations'] and 'phoenix.functions.allowUserDefinedFunctions' in services['configurations']["hbase-site"]["properties"]):
         putHbaseSitePropertyAttributes('phoenix.functions.allowUserDefinedFunctions', 'delete', 'true')
 
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    if 'ranger-hbase-plugin-properties' in services['configurations'] and ('ranger-hbase-plugin-enabled' in services['configurations']['ranger-hbase-plugin-properties']['properties']):
+    if "ranger-env" in services["configurations"] and "ranger-hbase-plugin-properties" in services["configurations"] and \
+        "ranger-hbase-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      putHbaseRangerPluginProperty = self.putProperty(configurations, "ranger-hbase-plugin-properties", services)
+      rangerEnvHbasePluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-hbase-plugin-enabled"]
+      putHbaseRangerPluginProperty("ranger-hbase-plugin-enabled", rangerEnvHbasePluginProperty)
+
+    rangerPluginEnabled = ''
+    if 'ranger-hbase-plugin-properties' in configurations and 'ranger-hbase-plugin-enabled' in  configurations['ranger-hbase-plugin-properties']['properties']:
+      rangerPluginEnabled = configurations['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled']
+    elif 'ranger-hbase-plugin-properties' in services['configurations'] and 'ranger-hbase-plugin-enabled' in services['configurations']['ranger-hbase-plugin-properties']['properties']:
       rangerPluginEnabled = services['configurations']['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled']
-      if ("RANGER" in servicesList) and (rangerPluginEnabled.lower() == "Yes".lower()):
-        putHbaseSiteProperty("hbase.security.authorization", 'true')
-        putHbaseSiteProperty("hbase.coprocessor.master.classes", 'com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor')
-        putHbaseSiteProperty("hbase.coprocessor.region.classes", 'com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor')
+
+    if rangerPluginEnabled and rangerPluginEnabled.lower() == 'Yes'.lower():
+      putHbaseSiteProperty('hbase.security.authorization','true')
 
     # Recommend configs for bucket cache
     threshold = 23 # 2 Gb is reserved for other offheap memory
@@ -670,11 +678,38 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     [uniqueCoprocessorRegionClassList.append(i) for i in coprocessorRegionClassList if not uniqueCoprocessorRegionClassList.count(i)]
     putHbaseSiteProperty('hbase.coprocessor.region.classes', ','.join(set(uniqueCoprocessorRegionClassList)))
 
-    if "ranger-env" in services["configurations"] and "ranger-hbase-plugin-properties" in services["configurations"] and \
-        "ranger-hbase-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
-      putHbaseRangerPluginProperty = self.putProperty(configurations, "ranger-hbase-plugin-properties", services)
-      rangerEnvHbasePluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-hbase-plugin-enabled"]
-      putHbaseRangerPluginProperty("ranger-hbase-plugin-enabled", rangerEnvHbasePluginProperty)
+    stackVersion = services["Versions"]["stack_version"]
+
+    if stackVersion == '2.2':
+      rangerClass = 'com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor'
+    else:
+      rangerClass = 'org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor'
+
+    nonRangerClass = 'org.apache.hadoop.hbase.security.access.AccessController'
+    hbaseClassConfigs = ['hbase.coprocessor.master.classes', 'hbase.coprocessor.region.classes']
+
+    for item in range(len(hbaseClassConfigs)):
+      if hbaseClassConfigs[item] in services['configurations']['hbase-site']['properties']:
+        if 'hbase-site' in configurations and hbaseClassConfigs[item] in configurations['hbase-site']['properties']:
+          coprocessorConfig = configurations['hbase-site']['properties'][hbaseClassConfigs[item]]
+        else:
+          coprocessorConfig = services['configurations']['hbase-site']['properties'][hbaseClassConfigs[item]]
+        coprocessorClasses = coprocessorConfig.split(",")
+        coprocessorClasses = filter(None, coprocessorClasses) # Removes empty string elements from array
+        if rangerPluginEnabled and rangerPluginEnabled.lower() == 'Yes'.lower():
+          if nonRangerClass in coprocessorClasses:
+            coprocessorClasses.remove(nonRangerClass)
+          if not rangerClass in coprocessorClasses:
+            coprocessorClasses.append(rangerClass)
+          putHbaseSiteProperty(hbaseClassConfigs[item], ','.join(coprocessorClasses))
+        elif rangerPluginEnabled and rangerPluginEnabled.lower() == 'No'.lower():
+          if rangerClass in coprocessorClasses:
+            coprocessorClasses.remove(rangerClass)
+            if not nonRangerClass in coprocessorClasses:
+              coprocessorClasses.append(nonRangerClass)
+            putHbaseSiteProperty(hbaseClassConfigs[item], ','.join(coprocessorClasses))
+      elif rangerPluginEnabled and rangerPluginEnabled.lower() == 'Yes'.lower():
+        putHbaseSiteProperty(hbaseClassConfigs[item], rangerClass)
 
 
   def recommendTezConfigurations(self, configurations, clusterData, services, hosts):
@@ -732,12 +767,36 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
 
   def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
+    putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
+    putStormSiteAttributes = self.putPropertyAttribute(configurations, "storm-site")
+    core_site = services["configurations"]["core-site"]["properties"]
+    stackVersion = services["Versions"]["stack_version"]
     if "ranger-env" in services["configurations"] and "ranger-storm-plugin-properties" in services["configurations"] and \
         "ranger-storm-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
       putStormRangerPluginProperty = self.putProperty(configurations, "ranger-storm-plugin-properties", services)
       rangerEnvStormPluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-storm-plugin-enabled"]
       putStormRangerPluginProperty("ranger-storm-plugin-enabled", rangerEnvStormPluginProperty)
 
+    rangerPluginEnabled = ''
+    if 'ranger-storm-plugin-properties' in configurations and 'ranger-storm-plugin-enabled' in  configurations['ranger-storm-plugin-properties']['properties']:
+      rangerPluginEnabled = configurations['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
+    elif 'ranger-storm-plugin-properties' in services['configurations'] and 'ranger-storm-plugin-enabled' in services['configurations']['ranger-storm-plugin-properties']['properties']:
+      rangerPluginEnabled = services['configurations']['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
+
+    nonRangerClass = 'backtype.storm.security.auth.authorizer.SimpleACLAuthorizer'
+    if stackVersion == '2.2':
+      rangerClass = 'com.xasecure.authorization.storm.authorizer.XaSecureStormAuthorizer'
+    else:
+      rangerClass = 'org.apache.ranger.authorization.storm.authorizer.RangerStormAuthorizer'
+    # Cluster is kerberized
+    if ('hadoop.security.authentication' in core_site and core_site['hadoop.security.authentication'] == 'kerberos'):
+      if rangerPluginEnabled and (rangerPluginEnabled.lower() == 'Yes'.lower()):
+        putStormSiteProperty('nimbus.authorizer',rangerClass)
+      elif (services["configurations"]["storm-site"]["properties"]["nimbus.authorizer"] == rangerClass):
+        putStormSiteProperty('nimbus.authorizer', nonRangerClass)
+    else:
+      putStormSiteAttributes('nimbus.authorizer', 'delete', 'true')
+
   def recommendKnoxConfigurations(self, configurations, clusterData, services, hosts):
     if "ranger-env" in services["configurations"] and "ranger-knox-plugin-properties" in services["configurations"] and \
         "ranger-knox-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
@@ -745,6 +804,29 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
       rangerEnvKnoxPluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-knox-plugin-enabled"]
       putKnoxRangerPluginProperty("ranger-knox-plugin-enabled", rangerEnvKnoxPluginProperty)
 
+    if 'topology' in services["configurations"] and 'content' in services["configurations"]["topology"]["properties"]:
+      putKnoxTopologyContent = self.putProperty(configurations, "topology", services)
+      rangerPluginEnabled = ''
+      if 'ranger-knox-plugin-properties' in configurations and 'ranger-knox-plugin-enabled' in  configurations['ranger-knox-plugin-properties']['properties']:
+        rangerPluginEnabled = configurations['ranger-knox-plugin-properties']['properties']['ranger-knox-plugin-enabled']
+      elif 'ranger-knox-plugin-properties' in services['configurations'] and 'ranger-knox-plugin-enabled' in services['configurations']['ranger-knox-plugin-properties']['properties']:
+        rangerPluginEnabled = services['configurations']['ranger-knox-plugin-properties']['properties']['ranger-knox-plugin-enabled']
+      topologyContent = services["configurations"]["topology"]["properties"]["content"]
+      authPattern = "<provider>\s*<role>\s*authorization\s*</role>[\s\S]*?</provider>"
+      authXml = re.search(authPattern, topologyContent)
+
+      if authXml.group(0):
+        authNamePattern = "<name>\s*(.*?)\s*</name>"
+        authName = re.search(authNamePattern, authXml.group(0))
+        newAuthName=''
+        if authName.group(1) == 'AclsAuthz' and rangerPluginEnabled and rangerPluginEnabled.lower() == "Yes".lower():
+          newAuthName = authName.group(0).replace('AclsAuthz', 'XASecurePDPKnox')
+        elif ((not rangerPluginEnabled) or rangerPluginEnabled.lower() != "Yes".lower()) and authName.group(1) == 'XASecurePDPKnox':
+          newAuthName = authName.group(0).replace('XASecurePDPKnox', 'AclsAuthz')
+        if newAuthName:
+          newAuthxml = authXml.group(0).replace(authName.group(0), newAuthName)
+          newTopologyXmlContent = topologyContent.replace(authXml.group(0), newAuthxml)
+          putKnoxTopologyContent('content', newTopologyXmlContent)
 
 
   def getServiceConfigurationValidators(self):

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml
index df2f3fe..c856ad3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml
@@ -54,4 +54,19 @@
     </description>
   </property>
 
+  <property>
+    <name>dfs.namenode.inode.attributes.provider.class</name>
+    <value>org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer</value>
+    <description>Enable ranger hdfs plugin</description>
+    <depends-on>
+      <property>
+        <type>ranger-hdfs-plugin-properties</type>
+        <name>ranger-hdfs-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/configuration/kafka-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/configuration/kafka-broker.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/configuration/kafka-broker.xml
index 6b69653..896db6f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/configuration/kafka-broker.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/configuration/kafka-broker.xml
@@ -138,4 +138,17 @@
       These metrics would be included even if the exclude prefix omits them.
     </description>
   </property>
+  <property>
+    <name>authorizer.class.name</name>
+    <value>kafka.security.auth.SimpleAclAuthorizer</value>
+    <description>
+      Kafka authorizer class
+    </description>
+    <depends-on>
+      <property>
+        <type>ranger-kafka-plugin-properties</type>
+        <name>ranger-kafka-plugin-enabled</name>
+      </property>
+    </depends-on>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml
index 12a8a21..7b91d59 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml
@@ -32,6 +32,30 @@
   </property>
 
   <property>
+    <name>yarn.acl.enable</name>
+    <value>false</value>
+    <description> Are acls enabled. </description>
+    <depends-on>
+      <property>
+        <type>ranger-yarn-plugin-properties</type>
+        <name>ranger-yarn-plugin-enabled</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>yarn.authorization-provider</name>
+    <value>org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer</value>
+    <description> Yarn authorization provider class. </description>
+    <depends-on>
+      <property>
+        <type>ranger-yarn-plugin-properties</type>
+        <name>ranger-yarn-plugin-enabled</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
     <name>yarn.admin.acl</name>
     <value>yarn</value>
     <description> ACL of who can be admin of the YARN cluster. </description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index 7a6662c..464f9cc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -238,14 +238,27 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
     super(HDP23StackAdvisor, self).recommendHDFSConfigurations(configurations, clusterData, services, hosts)
 
     putHdfsSiteProperty = self.putProperty(configurations, "hdfs-site", services)
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    putHdfsSitePropertyAttribute = self.putPropertyAttribute(configurations, "hdfs-site")
+
     if ('ranger-hdfs-plugin-properties' in services['configurations']) and ('ranger-hdfs-plugin-enabled' in services['configurations']['ranger-hdfs-plugin-properties']['properties']):
-      rangerPluginEnabled = services['configurations']['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled']
-      if ("RANGER" in servicesList) and (rangerPluginEnabled.lower() == 'Yes'.lower()):
+      rangerPluginEnabled = ''
+      if 'ranger-hdfs-plugin-properties' in configurations and 'ranger-hdfs-plugin-enabled' in  configurations['ranger-hdfs-plugin-properties']['properties']:
+        rangerPluginEnabled = configurations['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled']
+      elif 'ranger-hdfs-plugin-properties' in services['configurations'] and 'ranger-hdfs-plugin-enabled' in services['configurations']['ranger-hdfs-plugin-properties']['properties']:
+        rangerPluginEnabled = services['configurations']['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled']
+
+      if rangerPluginEnabled and (rangerPluginEnabled.lower() == 'Yes'.lower()):
         putHdfsSiteProperty("dfs.namenode.inode.attributes.provider.class",'org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer')
+      else:
+        putHdfsSitePropertyAttribute('dfs.namenode.inode.attributes.provider.class', 'delete', 'true')
+    else:
+      putHdfsSitePropertyAttribute('dfs.namenode.inode.attributes.provider.class', 'delete', 'true')
 
   def recommendKAFKAConfigurations(self, configurations, clusterData, services, hosts):
+    core_site = services["configurations"]["core-site"]["properties"]
     putKafkaBrokerProperty = self.putProperty(configurations, "kafka-broker", services)
+    putKafkaLog4jProperty = self.putProperty(configurations, "kafka-log4j", services)
+    putKafkaBrokerAttributes = self.putPropertyAttribute(configurations, "kafka-broker")
 
     if "ranger-env" in services["configurations"] and "ranger-kafka-plugin-properties" in services["configurations"] and \
         "ranger-kafka-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
@@ -253,11 +266,68 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
       rangerEnvKafkaPluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-kafka-plugin-enabled"]
       putKafkaRangerPluginProperty("ranger-kafka-plugin-enabled", rangerEnvKafkaPluginProperty)
 
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     if 'ranger-kafka-plugin-properties' in services['configurations'] and ('ranger-kafka-plugin-enabled' in services['configurations']['ranger-kafka-plugin-properties']['properties']):
-      rangerPluginEnabled = services['configurations']['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled']
-      if ("RANGER" in servicesList) and (rangerPluginEnabled.lower() == "Yes".lower()):
+      kafkaLog4jRangerLines = [{
+        "name": "log4j.appender.rangerAppender",
+        "value": "org.apache.log4j.DailyRollingFileAppender"
+        },
+        {
+          "name": "log4j.appender.rangerAppender.DatePattern",
+          "value": "'.'yyyy-MM-dd-HH"
+        },
+        {
+          "name": "log4j.appender.rangerAppender.File",
+          "value": "${kafka.logs.dir}/ranger_kafka.log"
+        },
+        {
+          "name": "log4j.appender.rangerAppender.layout",
+          "value": "org.apache.log4j.PatternLayout"
+        },
+        {
+          "name": "log4j.appender.rangerAppender.layout.ConversionPattern",
+          "value": "%d{ISO8601} %p [%t] %C{6} (%F:%L) - %m%n"
+        },
+        {
+          "name": "log4j.logger.org.apache.ranger",
+          "value": "INFO, rangerAppender"
+        }]
+
+      rangerPluginEnabled=''
+      if 'ranger-kafka-plugin-properties' in configurations and 'ranger-kafka-plugin-enabled' in  configurations['ranger-kafka-plugin-properties']['properties']:
+        rangerPluginEnabled = configurations['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled']
+      elif 'ranger-kafka-plugin-properties' in services['configurations'] and 'ranger-kafka-plugin-enabled' in services['configurations']['ranger-kafka-plugin-properties']['properties']:
+        rangerPluginEnabled = services['configurations']['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled']
+
+      if  rangerPluginEnabled and rangerPluginEnabled.lower() == "Yes".lower():
+        # recommend authorizer.class.name
         putKafkaBrokerProperty("authorizer.class.name", 'org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer')
+        # change kafka-log4j when ranger plugin is installed
+
+        if 'kafka-log4j' in services['configurations'] and 'content' in services['configurations']['kafka-log4j']['properties']:
+          kafkaLog4jContent = services['configurations']['kafka-log4j']['properties']['content']
+          for item in range(len(kafkaLog4jRangerLines)):
+            if kafkaLog4jRangerLines[item]["name"] not in kafkaLog4jContent:
+              kafkaLog4jContent+= '\n' + kafkaLog4jRangerLines[item]["name"] + '=' + kafkaLog4jRangerLines[item]["value"]
+          putKafkaLog4jProperty("content",kafkaLog4jContent)
+
+
+      else:
+      # Cluster is kerberized
+        if 'hadoop.security.authentication' in core_site and core_site['hadoop.security.authentication'] == 'kerberos' and \
+          services['configurations']['kafka-broker']['properties']['authorizer.class.name'] == 'org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer':
+          putKafkaBrokerProperty("authorizer.class.name", 'kafka.security.auth.SimpleAclAuthorizer')
+        else:
+          putKafkaBrokerAttributes('authorizer.class.name', 'delete', 'true')
+      # Cluster with Ranger is not kerberized
+    elif ('hadoop.security.authentication' not in core_site or core_site['hadoop.security.authentication'] != 'kerberos'):
+      putKafkaBrokerAttributes('authorizer.class.name', 'delete', 'true')
+
+
+
+    # Cluster without Ranger is not kerberized
+    elif ('hadoop.security.authentication' not in core_site or core_site['hadoop.security.authentication'] != 'kerberos'):
+      putKafkaBrokerAttributes('authorizer.class.name', 'delete', 'true')
+
 
   def recommendRangerConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP23StackAdvisor, self).recommendRangerConfigurations(configurations, clusterData, services, hosts)
@@ -370,11 +440,24 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
 
   def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP23StackAdvisor, self).recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
+    putYarnSitePropertyAttributes = self.putPropertyAttribute(configurations, "yarn-site")
     if "ranger-env" in services["configurations"] and "ranger-yarn-plugin-properties" in services["configurations"] and \
         "ranger-yarn-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
       putYarnRangerPluginProperty = self.putProperty(configurations, "ranger-yarn-plugin-properties", services)
       rangerEnvYarnPluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-yarn-plugin-enabled"]
       putYarnRangerPluginProperty("ranger-yarn-plugin-enabled", rangerEnvYarnPluginProperty)
+    rangerPluginEnabled = ''
+    if 'ranger-yarn-plugin-properties' in configurations and 'ranger-yarn-plugin-enabled' in  configurations['ranger-yarn-plugin-properties']['properties']:
+      rangerPluginEnabled = configurations['ranger-yarn-plugin-properties']['properties']['ranger-yarn-plugin-enabled']
+    elif 'ranger-yarn-plugin-properties' in services['configurations'] and 'ranger-yarn-plugin-enabled' in services['configurations']['ranger-yarn-plugin-properties']['properties']:
+      rangerPluginEnabled = services['configurations']['ranger-yarn-plugin-properties']['properties']['ranger-yarn-plugin-enabled']
+
+    if rangerPluginEnabled and (rangerPluginEnabled.lower() == 'Yes'.lower()):
+      putYarnSiteProperty('yarn.acl.enable','true')
+      putYarnSiteProperty('yarn.authorization-provider','org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer')
+    else:
+      putYarnSitePropertyAttributes('yarn.authorization-provider', 'delete', 'true')
 
   def getServiceConfigurationValidators(self):
       parentValidators = super(HDP23StackAdvisor, self).getServiceConfigurationValidators()

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index 2ce1cee..7abdcd0 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -2271,6 +2271,9 @@ class TestHDP22StackAdvisor(TestCase):
     services = {
       "services" : [
       ],
+      "Versions": {
+        "stack_version": "2.2"
+      },
       "configurations": {
         "hbase-env": {
           "properties": {
@@ -2285,7 +2288,13 @@ class TestHDP22StackAdvisor(TestCase):
             "hbase.bucketcache.ioengine": "",
             "hbase.bucketcache.size": "",
             "hbase.bucketcache.percentage.in.combinedcache": "",
-            "hbase.coprocessor.regionserver.classes": ""
+            "hbase.coprocessor.regionserver.classes": "",
+            "hbase.coprocessor.region.classes": ""
+          }
+        },
+        "ranger-hbase-plugin-properties": {
+          "properties": {
+            "ranger-hbase-plugin-enabled" : "No"
           }
         }
       }
@@ -2331,7 +2340,7 @@ class TestHDP22StackAdvisor(TestCase):
 
     # Test when phoenix_sql_enabled = true
     self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
-    self.assertEquals(configurations, expected)
+    self.assertEquals(configurations, expected, "Test when Phoenix sql is enabled")
 
     # Test when phoenix_sql_enabled = false
     services['configurations']['hbase-env']['properties']['phoenix_sql_enabled'] = 'false'
@@ -2340,7 +2349,7 @@ class TestHDP22StackAdvisor(TestCase):
     expected['hbase-site']['property_attributes']['hbase.coprocessor.regionserver.classes'] = {'delete': 'true'}
     expected['hbase-site']['property_attributes']['phoenix.functions.allowUserDefinedFunctions'] = {'delete': 'true'}
     self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
-    self.assertEquals(configurations, expected)
+    self.assertEquals(configurations, expected, "Test when Phoenix sql is disabled")
 
     # Test hbase_master_heapsize maximum
     hosts['items'][0]['Hosts']['host_name'] = 'host1'
@@ -2375,27 +2384,53 @@ class TestHDP22StackAdvisor(TestCase):
     expected['hbase-site']['property_attributes']['phoenix.functions.allowUserDefinedFunctions'] = {'delete': 'true'}
     expected['hbase-env']['property_attributes']['hbase_master_heapsize'] = {'maximum': '49152'}
     self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations, expected)
+    self.assertEquals(configurations, expected, "Test with Phoenix disabled")
 
     # Test when hbase.security.authentication = kerberos
     services['configurations']['hbase-site']['properties']['hbase.security.authentication'] = 'kerberos'
     expected['hbase-site']['properties']['hbase.coprocessor.region.classes'] = 'org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint'
     self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
-    self.assertEquals(configurations, expected)
+    self.assertEquals(configurations, expected, "Test with Kerberos enabled")
 
     # Test when hbase.security.authentication = simple
     services['configurations']['hbase-site']['properties']['hbase.security.authentication'] = 'simple'
     expected['hbase-site']['properties']['hbase.coprocessor.region.classes'] = 'org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint'
     self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
-    self.assertEquals(configurations, expected)
+    self.assertEquals(configurations, expected, "Test with Kerberos disabled")
+
+    # Test when Ranger plugin HBase is enabled in non-kerberos environment
+    configurations['hbase-site']['properties'].pop('hbase.coprocessor.region.classes', None)
+    configurations['hbase-site']['properties'].pop('hbase.coprocessor.master.classes', None)
+    configurations['hbase-site']['properties'].pop('hbase.coprocessor.regionserver.classes', None)
+    services['configurations']['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled'] = 'Yes'
+    services['configurations']['hbase-site']['properties']['hbase.security.authentication'] = 'simple'
+    services['configurations']['hbase-site']['properties']['hbase.security.authorization'] = 'false'
+    services['configurations']['hbase-site']['properties']['hbase.coprocessor.region.classes'] = ''
+    services['configurations']['hbase-site']['properties']['hbase.coprocessor.master.classes'] = ''
+
+    expected['hbase-site']['properties']['hbase.security.authorization'] = "true"
+    expected['hbase-site']['properties']['hbase.coprocessor.region.classes'] = 'org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor'
+    expected['hbase-site']['properties']['hbase.coprocessor.master.classes'] = 'com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor'
+    expected['hbase-site']['properties']['hbase.coprocessor.regionserver.classes'] = 'org.apache.hadoop.hbase.security.access.AccessController'
+    self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations, expected)     #"Test when Ranger plugin HBase is enabled in non-kerberos environment"
 
     # Test when hbase.security.authentication = kerberos AND class already there
     configurations['hbase-site']['properties'].pop('hbase.coprocessor.region.classes', None)
+    configurations['hbase-site']['properties'].pop('hbase.coprocessor.master.classes', None)
+    configurations['hbase-site']['properties'].pop('hbase.coprocessor.regionserver.classes', None)
+    configurations['hbase-site']['properties'].pop('hbase.security.authorization', None)
+    services['configurations']['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled'] = 'No'
+    services['configurations']['hbase-site']['properties']['hbase.security.authorization'] = 'false'
     services['configurations']['hbase-site']['properties']['hbase.security.authentication'] = 'kerberos'
+    services['configurations']['hbase-site']['properties']['hbase.coprocessor.master.classes'] = ''
     services['configurations']['hbase-site']['properties']['hbase.coprocessor.region.classes'] = 'a.b.c.d'
     expected['hbase-site']['properties']['hbase.coprocessor.region.classes'] = 'a.b.c.d,org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint'
+    expected['hbase-site']['properties']['hbase.coprocessor.master.classes'] = ''
+    del expected['hbase-site']['properties']['hbase.security.authorization']
+    del expected['hbase-site']['properties']['hbase.coprocessor.regionserver.classes']
     self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
-    self.assertEquals(configurations, expected)
+    self.assertEquals(configurations, expected, "Test with Kerberos enabled and hbase.coprocessor.region.classes predefined")
 
     # Test when hbase.security.authentication = kerberos AND authorization = true
     configurations['hbase-site']['properties'].pop('hbase.coprocessor.region.classes', None)
@@ -2406,7 +2441,20 @@ class TestHDP22StackAdvisor(TestCase):
     expected['hbase-site']['properties']['hbase.coprocessor.region.classes'] = 'org.apache.hadoop.hbase.security.access.AccessController,org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint'
     expected['hbase-site']['properties']['hbase.coprocessor.regionserver.classes'] = "org.apache.hadoop.hbase.security.access.AccessController"
     self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
-    self.assertEquals(configurations, expected)
+    self.assertEquals(configurations, expected, "Test with Kerberos enabled and authorization is true")
+
+    # Test when Ranger plugin HBase is enabled in kerberos environment
+    configurations['hbase-site']['properties'].pop('hbase.coprocessor.region.classes', None)
+    services['configurations']['hbase-site']['properties']['hbase.coprocessor.region.classes'] = ''
+    services['configurations']['hbase-site']['properties']['hbase.coprocessor.master.classes'] = ''
+    services['configurations']['hbase-site']['properties']['hbase.security.authentication'] = 'kerberos'
+    services['configurations']['hbase-site']['properties']['hbase.security.authorization'] = 'false'
+    services['configurations']['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled'] = 'Yes'
+    expected['hbase-site']['properties']['hbase.security.authorization']  = 'true'
+    expected['hbase-site']['properties']['hbase.coprocessor.master.classes'] = 'com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor'
+    expected['hbase-site']['properties']['hbase.coprocessor.region.classes'] = 'org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor'
+    self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations, expected, "Test with Kerberos enabled and HBase ranger plugin enabled")
 
     # Test - default recommendations should have certain configs deleted. HAS TO BE LAST TEST.
     services["configurations"] = {"hbase-site": {"properties": {"phoenix.functions.allowUserDefinedFunctions": '', "hbase.rpc.controllerfactory.class": ''}}}
@@ -2417,6 +2465,70 @@ class TestHDP22StackAdvisor(TestCase):
     self.assertEquals(configurations['hbase-site']['properties']['hbase.regionserver.wal.codec'], "org.apache.hadoop.hbase.regionserver.wal.WALCellCodec")
 
 
+  def test_recommendStormConfigurations(self):
+    configurations = {}
+    clusterData = {}
+    services = {
+      "services":
+        [
+          {
+            "StackServices": {
+              "service_name" : "STORM",
+              "service_version" : "2.6.0.2.2"
+            }
+          }
+        ],
+      "Versions": {
+        "stack_version": "2.2"
+      },
+      "configurations": {
+        "core-site": {
+          "properties": { },
+        },
+        "storm-site": {
+          "properties": {
+            "nimbus.authorizer" : "backtype.storm.security.auth.authorizer.SimpleACLAuthorizer"
+          },
+          "property_attributes": {}
+        },
+        "ranger-storm-plugin-properties": {
+          "properties": {
+            "ranger-storm-plugin-enabled": "No"
+          }
+        }
+      }
+    }
+
+    # Test nimbus.authorizer with Ranger Storm plugin disabled in non-kerberos environment
+    self.stackAdvisor.recommendStormConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations['storm-site']['property_attributes']['nimbus.authorizer'], {'delete': 'true'}, "Test nimbus.authorizer with Ranger Storm plugin disabled in non-kerberos environment")
+
+    # Test nimbus.authorizer with Ranger Storm plugin enabled in non-kerberos environment
+    configurations['storm-site']['properties'] = {}
+    configurations['storm-site']['property_attributes'] = {}
+    services['configurations']['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled'] = 'Yes'
+    self.stackAdvisor.recommendStormConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations['storm-site']['property_attributes']['nimbus.authorizer'], {'delete': 'true'}, "Test nimbus.authorizer with Ranger Storm plugin enabled in non-kerberos environment")
+
+    # Test nimbus.authorizer with Ranger Storm plugin being enabled in kerberos environment
+    configurations['storm-site']['properties'] = {}
+    configurations['storm-site']['property_attributes'] = {}
+    services['configurations']['storm-site']['properties']['nimbus.authorizer'] = ''
+    services['configurations']['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled'] = 'Yes'
+    services['configurations']['core-site']['properties']['hadoop.security.authentication'] = 'kerberos'
+    self.stackAdvisor.recommendStormConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations['storm-site']['properties']['nimbus.authorizer'], 'com.xasecure.authorization.storm.authorizer.XaSecureStormAuthorizer', "Test nimbus.authorizer with Ranger Storm plugin enabled in kerberos environment")
+
+    # Test nimbus.authorizer with Ranger Storm plugin being disabled in kerberos environment
+    configurations['storm-site']['properties'] = {}
+    configurations['storm-site']['property_attributes'] = {}
+    services['configurations']['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled'] = 'No'
+    services['configurations']['core-site']['properties']['hadoop.security.authentication'] = 'kerberos'
+    services['configurations']['storm-site']['properties']['nimbus.authorizer'] = 'com.xasecure.authorization.storm.authorizer.XaSecureStormAuthorizer'
+    self.stackAdvisor.recommendStormConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations['storm-site']['properties']['nimbus.authorizer'], 'backtype.storm.security.auth.authorizer.SimpleACLAuthorizer', "Test nimbus.authorizer with Ranger Storm plugin being disabled in kerberos environment")
+
+
   def test_recommendHDFSConfigurations(self):
     configurations = {
       'ranger-hdfs-plugin-properties':{

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
index ff6c93e..33ad293 100644
--- a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
@@ -71,6 +71,207 @@ class TestHDP23StackAdvisor(TestCase):
     open_mock.return_value = MagicFile()
     return self.get_system_min_uid_real()
 
+  def test_recommendHDFSConfigurations(self):
+    configurations = {}
+    clusterData = {
+      "totalAvailableRam": 2048,
+      "hBaseInstalled": True,
+      "hbaseRam": 112,
+      "reservedRam": 128
+    }
+    services = {
+      "services":
+        [
+          {
+            "StackServices": {
+              "service_name" : "HDFS",
+              "service_version" : "2.6.0.2.2"
+            }
+          }
+        ],
+      "Versions": {
+        "stack_version": "2.3"
+      },
+      "configurations": {
+        "hdfs-site": {
+          "properties": {
+            "dfs.namenode.inode.attributes.provider.class": "org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer"
+          }
+        },
+        "ranger-hdfs-plugin-properties": {
+          "properties": {
+            "ranger-hdfs-plugin-enabled": "No"
+          }
+        }
+      }
+    }
+
+    # Test with Ranger HDFS plugin disabled
+    self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations['hdfs-site']['property_attributes']['dfs.namenode.inode.attributes.provider.class'], {'delete': 'true'}, "Test with Ranger HDFS plugin is disabled")
+
+    # Test with Ranger HDFS plugin is enabled
+    configurations['hdfs-site']['properties'] = {}
+    configurations['hdfs-site']['property_attributes'] = {}
+    services['configurations']['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled'] = 'Yes'
+    self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations['hdfs-site']['properties']['dfs.namenode.inode.attributes.provider.class'], 'org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer', "Test with Ranger HDFS plugin is enabled")
+
+  def test_recommendYARNConfigurations(self):
+    configurations = {}
+    servicesList = ["YARN"]
+    components = []
+    hosts = {
+      "items" : [
+        {
+          "Hosts" : {
+            "cpu_count" : 6,
+            "total_mem" : 50331648,
+            "disk_info" : [
+              {"mountpoint" : "/"},
+              {"mountpoint" : "/dev/shm"},
+              {"mountpoint" : "/vagrant"},
+              {"mountpoint" : "/"},
+              {"mountpoint" : "/dev/shm"},
+              {"mountpoint" : "/vagrant"}
+            ],
+            "public_host_name" : "c6401.ambari.apache.org",
+            "host_name" : "c6401.ambari.apache.org"
+          }
+        }
+      ]
+    }
+    services = {
+      "services" : [ {
+        "StackServices":{
+          "service_name": "YARN",
+        },
+        "Versions": {
+          "stack_version": "2.3"
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NODEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            }
+          }
+        ]
+      }
+      ],
+      "configurations": {
+        "yarn-site": {
+          "properties": {
+            "yarn.authorization-provider": "org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer"
+          }
+        },
+        "ranger-yarn-plugin-properties": {
+          "properties": {
+            "ranger-yarn-plugin-enabled": "No"
+          }
+        }
+      }
+    }
+
+    clusterData = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
+    # Test with Ranger YARN plugin disabled
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations['yarn-site']['property_attributes']['yarn.authorization-provider'], {'delete': 'true'}, "Test with Ranger HDFS plugin is disabled")
+
+    # Test with Ranger YARN plugin is enabled
+    configurations['yarn-site']['properties'] = {}
+    configurations['yarn-site']['property_attributes'] = {}
+    services['configurations']['ranger-yarn-plugin-properties']['properties']['ranger-yarn-plugin-enabled'] = 'Yes'
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations['yarn-site']['properties']['yarn.authorization-provider'], 'org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer', "Test with Ranger YARN plugin enabled")
+
+
+  def test_recommendKAFKAConfigurations(self):
+    configurations = {}
+    clusterData = {
+      "totalAvailableRam": 2048,
+      "hBaseInstalled": True,
+      "hbaseRam": 112,
+      "reservedRam": 128
+    }
+    services = {
+      "services":
+        [
+          {
+            "StackServices": {
+              "service_name" : "KAFKA",
+              "service_version" : "2.6.0.2.2"
+            }
+          }
+        ],
+      "Versions": {
+        "stack_version": "2.3"
+      },
+      "configurations": {
+        "core-site": {
+          "properties": { },
+        },
+        "kafka-broker": {
+          "properties": {
+            "authorizer.class.name" : "kafka.security.auth.SimpleAclAuthorizer"
+          },
+          "property_attributes": {}
+        },
+        "ranger-kafka-plugin-properties": {
+          "properties": {
+            "ranger-kafka-plugin-enabled": "No"
+          }
+        },
+        "kafka-log4j": {
+          "properties": {
+            "content": "kafka.logs.dir=logs"
+          }
+        }
+      }
+    }
+
+    # Test authorizer.class.name with Ranger Kafka plugin disabled in non-kerberos environment
+    self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations['kafka-broker']['property_attributes']['authorizer.class.name'], {'delete': 'true'}, "Test authorizer.class.name with Ranger Kafka plugin is disabled in non-kerberos environment")
+
+    # Test authorizer.class.name with Ranger Kafka plugin disabled in kerberos environment
+    configurations['kafka-broker']['properties'] = {}
+    configurations['kafka-broker']['property_attributes'] = {}
+    services['configurations']['core-site']['properties']['hadoop.security.authentication'] = 'kerberos'
+    services['configurations']['kafka-broker']['properties']['authorizer.class.name'] = 'org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer'
+    self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations['kafka-broker']['properties']['authorizer.class.name'], 'kafka.security.auth.SimpleAclAuthorizer' , "Test authorizer.class.name with Ranger Kafka plugin disabled in kerberos environment")
+
+    # Test authorizer.class.name with Ranger Kafka plugin enabled in non-kerberos environment
+    configurations['kafka-broker']['properties'] = {}
+    configurations['kafka-broker']['property_attributes'] = {}
+    del services['configurations']['core-site']['properties']['hadoop.security.authentication']
+    services['configurations']['kafka-broker']['properties']['authorizer.class.name'] = 'kafka.security.auth.SimpleAclAuthorizer'
+    services['configurations']['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'] = 'Yes'
+    self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations['kafka-broker']['properties']['authorizer.class.name'], 'org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer', "Test authorizer.class.name with Ranger Kafka plugin enabled in kerberos environment")
+
+    # Test authorizer.class.name with Ranger Kafka plugin enabled in kerberos environment
+    configurations['kafka-broker']['properties'] = {}
+    configurations['kafka-broker']['property_attributes'] = {}
+    services['configurations']['core-site']['properties']['hadoop.security.authentication'] = 'kerberos'
+    services['configurations']['kafka-broker']['properties']['authorizer.class.name'] = 'kafka.security.auth.SimpleAclAuthorizer'
+    services['configurations']['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'] = 'Yes'
+    self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations['kafka-broker']['properties']['authorizer.class.name'], 'org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer', "Test authorizer.class.name with Ranger Kafka plugin enabled in kerberos environment")
+
+    # Test kafka-log4j content when Ranger plugin for Kafka is enabled
+
+    self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
+    log4jContent = services['configurations']['kafka-log4j']['properties']['content']
+    newRangerLog4content = "\nlog4j.appender.rangerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.rangerAppender.DatePattern='.'yyyy-MM-dd-HH\n" \
+                     "log4j.appender.rangerAppender.File=${kafka.logs.dir}/ranger_kafka.log\nlog4j.appender.rangerAppender.layout" \
+                     "=org.apache.log4j.PatternLayout\nlog4j.appender.rangerAppender.layout.ConversionPattern=%d{ISO8601} %p [%t] %C{6} (%F:%L) - %m%n\n" \
+                     "log4j.logger.org.apache.ranger=INFO, rangerAppender"
+    expectedLog4jContent = log4jContent + newRangerLog4content
+    self.assertEquals(configurations['kafka-log4j']['properties']['content'], expectedLog4jContent, "Test kafka-log4j content when Ranger plugin for Kafka is enabled")
+
+
   def test_recommendHBASEConfigurations(self):
     configurations = {}
     clusterData = {
@@ -201,6 +402,9 @@ class TestHDP23StackAdvisor(TestCase):
             },
             ],
           }],
+      "Versions": {
+        "stack_version": "2.3"
+      },
       "configurations": {
         "yarn-site": {
           "properties": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-web/app/utils/configs/modification_handlers/hbase.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/modification_handlers/hbase.js b/ambari-web/app/utils/configs/modification_handlers/hbase.js
deleted file mode 100644
index bcb87d2..0000000
--- a/ambari-web/app/utils/configs/modification_handlers/hbase.js
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-require('utils/configs/modification_handlers/modification_handler');
-
-module.exports = App.ServiceConfigModificationHandler.create({
-  serviceId : 'HBASE',
-
-  updateConfigClasses : function(configClasses, authEnabled, affectedProperties, addOldValue) {
-    if (configClasses != null) {
-      var xaAuthCoProcessorClass = App.get('isHadoop23Stack') ? "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor"
-        : "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor";
-      var nonXAClass = 'org.apache.hadoop.hbase.security.access.AccessController';
-      var currentClassesList = configClasses.get('value').trim().length > 0 ? configClasses.get('value').trim().split(',') : [];
-      var newClassesList = null, xaClassIndex, nonXaClassIndex;
-
-      if (authEnabled) {
-        var nonXaClassIndex = currentClassesList.indexOf(nonXAClass);
-        if (nonXaClassIndex > -1) {
-          currentClassesList.splice(nonXaClassIndex, 1);
-          newClassesList = currentClassesList;
-        }
-        var xaClassIndex = currentClassesList.indexOf(xaAuthCoProcessorClass);
-        if (xaClassIndex < 0) {
-          currentClassesList.push(xaAuthCoProcessorClass);
-          newClassesList = currentClassesList;
-        }
-      } else {
-        var xaClassIndex = currentClassesList.indexOf(xaAuthCoProcessorClass);
-        if (xaClassIndex > -1) {
-          currentClassesList.splice(xaClassIndex, 1);
-          newClassesList = currentClassesList;
-        }
-        if (addOldValue) {
-          var nonXaClassIndex = currentClassesList.indexOf(nonXAClass);
-          if (nonXaClassIndex < 0) {
-            currentClassesList.push(nonXAClass);
-            newClassesList = currentClassesList;
-          }
-        }
-      }
-
-      if (newClassesList != null) {
-        affectedProperties.push({
-          serviceName : "HBASE",
-          sourceServiceName : "HBASE",
-          propertyName : configClasses.get('name'),
-          propertyDisplayName : configClasses.get('name'),
-          newValue : newClassesList.join(','),
-          curValue : configClasses.get('value'),
-          changedPropertyName : 'ranger-hbase-plugin-enabled',
-          removed : false,
-          filename : 'hbase-site.xml'
-        });
-      }
-    }
-  },
-
-  getDependentConfigChanges : function(changedConfig, selectedServices, allConfigs, securityEnabled) {
-    var affectedProperties = [];
-    var newValue = changedConfig.get("value");
-    var hbaseAuthEnabledPropertyName = "ranger-hbase-plugin-enabled";
-    var affectedPropertyName = changedConfig.get("name");
-    if (affectedPropertyName == hbaseAuthEnabledPropertyName) {
-      var configAuthEnabled = this.getConfig(allConfigs, 'hbase.security.authorization', 'hbase-site.xml', 'HBASE');
-      var configMasterClasses = this.getConfig(allConfigs, 'hbase.coprocessor.master.classes', 'hbase-site.xml', 'HBASE');
-      var configRegionClasses = this.getConfig(allConfigs, 'hbase.coprocessor.region.classes', 'hbase-site.xml', 'HBASE');
-
-      var authEnabled = newValue == "Yes";
-      var newAuthEnabledValue = authEnabled ? "true" : "false";
-      var newRpcProtectionValue = authEnabled ? "privacy" : "authentication";
-
-      // Add HBase-Ranger configs
-      this.updateConfigClasses(configMasterClasses, authEnabled, affectedProperties, configAuthEnabled.get('value') == 'true');
-      this.updateConfigClasses(configRegionClasses, authEnabled, affectedProperties, configAuthEnabled.get('value') == 'true');
-      if (authEnabled && newAuthEnabledValue !== configAuthEnabled.get('value')) {
-        affectedProperties.push({
-          serviceName : "HBASE",
-          sourceServiceName : "HBASE",
-          propertyName : 'hbase.security.authorization',
-          propertyDisplayName : 'hbase.security.authorization',
-          newValue : newAuthEnabledValue,
-          curValue : configAuthEnabled.get('value'),
-          changedPropertyName : hbaseAuthEnabledPropertyName,
-          removed : false,
-          filename : 'hbase-site.xml'
-        });
-      }
-    }
-    return affectedProperties;
-  }
-});

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-web/app/utils/configs/modification_handlers/hdfs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/modification_handlers/hdfs.js b/ambari-web/app/utils/configs/modification_handlers/hdfs.js
deleted file mode 100644
index c77a716..0000000
--- a/ambari-web/app/utils/configs/modification_handlers/hdfs.js
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-require('utils/configs/modification_handlers/modification_handler');
-
-module.exports = App.ServiceConfigModificationHandler.create({
-  serviceId : 'HDFS',
-
-  getDependentConfigChanges : function(changedConfig, selectedServices, allConfigs, securityEnabled) {
-    var affectedProperties = [];
-    var newValue = changedConfig.get("value");
-    var rangerPluginEnabledName = "ranger-hdfs-plugin-enabled";
-    var affectedPropertyName = changedConfig.get("name");
-    if (App.get('isHadoop23Stack') && affectedPropertyName == rangerPluginEnabledName) {
-      var configAttributesProviderClass = this.getConfig(allConfigs, 'dfs.namenode.inode.attributes.provider.class', 'hdfs-site.xml', 'HDFS');
-      var isAttributesProviderClassSet = typeof configAttributesProviderClass !== 'undefined';
-
-      var rangerPluginEnabled = newValue == "Yes";
-      var newDfsPermissionsEnabled = rangerPluginEnabled ? "true" : "false";
-      var newAttributesProviderClass = 'org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer';
-
-      if (rangerPluginEnabled && (!isAttributesProviderClassSet || newAttributesProviderClass != configAttributesProviderClass.get('value'))) {
-        affectedProperties.push({
-          serviceName : "HDFS",
-          sourceServiceName : "HDFS",
-          propertyName : 'dfs.namenode.inode.attributes.provider.class',
-          propertyDisplayName : 'dfs.namenode.inode.attributes.provider.class',
-          newValue : newAttributesProviderClass,
-          curValue : isAttributesProviderClassSet ? configAttributesProviderClass.get('value') : '',
-          changedPropertyName : rangerPluginEnabledName,
-          removed : false,
-          isNewProperty : !isAttributesProviderClassSet,
-          filename : 'hdfs-site.xml',
-          categoryName: 'Custom hdfs-site'
-        });
-      }
-    }
-    return affectedProperties;
-  }
-});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-web/app/utils/configs/modification_handlers/kafka.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/modification_handlers/kafka.js b/ambari-web/app/utils/configs/modification_handlers/kafka.js
deleted file mode 100644
index ff5168f..0000000
--- a/ambari-web/app/utils/configs/modification_handlers/kafka.js
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-require('utils/configs/modification_handlers/modification_handler');
-
-module.exports = App.ServiceConfigModificationHandler.create({
-  serviceId: 'KAFKA',
-
-  getDependentConfigChanges: function (changedConfig, selectedServices, allConfigs) {
-    var rangerPluginEnabledName = "ranger-kafka-plugin-enabled";
-    var affectedProperties = [];
-    var affectedPropertyName = changedConfig.get("name");
-    var authorizerClassName, kafkaLog4jContent, newLog4jContentValue;
-    var isEnabling = changedConfig.get('value') === 'Yes';
-
-    if (affectedPropertyName === rangerPluginEnabledName) {
-      authorizerClassName = this.getConfig(allConfigs, 'authorizer.class.name', 'kafka-broker.xml', 'KAFKA');
-      kafkaLog4jContent = this.getConfig(allConfigs, 'content', 'kafka-log4j.xml', 'KAFKA');
-      newLog4jContentValue = kafkaLog4jContent.get('value');
-      newLog4jContentValue += "\n\nlog4j.appender.rangerAppender=org.apache.log4j.DailyRollingFileAppender\n" +
-      "log4j.appender.rangerAppender.DatePattern='.'yyyy-MM-dd-HH\n" +
-      "log4j.appender.rangerAppender.File=${kafka.logs.dir}/ranger_kafka.log\n" +
-      "log4j.appender.rangerAppender.layout=org.apache.log4j.PatternLayout\n" +
-      "log4j.appender.rangerAppender.layout.ConversionPattern=%d{ISO8601} %p [%t] %C{6} (%F:%L) - %m%n\n" +
-      "log4j.logger.org.apache.ranger=INFO, rangerAppender";
-
-      affectedProperties = [
-        {
-          serviceName: "KAFKA",
-          sourceServiceName: "KAFKA",
-          propertyName: 'authorizer.class.name',
-          propertyDisplayName: 'authorizer.class.name',
-          newValue: isEnabling ? 'org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer' :
-              App.StackConfigProperty.find().findProperty('name', 'authorizer.class.name').get('value'),
-          curValue: authorizerClassName.get('value'),
-          changedPropertyName: rangerPluginEnabledName,
-          removed: false,
-          filename: 'kafka-broker.xml'
-        },
-        {
-          serviceName: "KAFKA",
-          sourceServiceName: "KAFKA",
-          propertyName: 'content',
-          propertyDisplayName: 'content',
-          newValue: isEnabling ? newLog4jContentValue : App.StackConfigProperty.find().filterProperty('filename', 'kafka-log4j.xml').findProperty('name', 'content').get('value'),
-          curValue: kafkaLog4jContent.get('value'),
-          changedPropertyName: rangerPluginEnabledName,
-          removed: false,
-          filename: 'kafka-log4j.xml'
-        }
-      ];
-    }
-
-    return affectedProperties;
-  }
-});

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-web/app/utils/configs/modification_handlers/knox.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/modification_handlers/knox.js b/ambari-web/app/utils/configs/modification_handlers/knox.js
deleted file mode 100644
index 482c535..0000000
--- a/ambari-web/app/utils/configs/modification_handlers/knox.js
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-require('utils/configs/modification_handlers/modification_handler');
-
-module.exports = App.ServiceConfigModificationHandler.create({
-  serviceId : 'KNOX',
-
-  getDependentConfigChanges : function(changedConfig, selectedServices, allConfigs, securityEnabled) {
-    var affectedProperties = [];
-    var newValue = changedConfig.get("value");
-    var rangerPluginEnablePropertyName = "ranger-knox-plugin-enabled";
-    var affectedPropertyName = changedConfig.get("name");
-    if (affectedPropertyName == rangerPluginEnablePropertyName) {
-      var topologyXmlContent = this.getConfig(allConfigs, 'content', 'topology.xml', 'KNOX');
-      if (topologyXmlContent != null) {
-        var topologyXmlContentString = topologyXmlContent.get('value');
-        var newTopologyXmlContentString = null;
-        var authEnabled = newValue == "Yes";
-        var authXml = /<provider>[\s]*<role>[\s]*authorization[\s]*<\/role>[\s\S]*?<\/provider>/.exec(topologyXmlContentString);
-        if (authXml != null && authXml.length > 0) {
-          var nameArray = /<name>\s*(.*?)\s*<\/name>/.exec(authXml[0]);
-          if (nameArray != null && nameArray.length > 1) {
-            if (authEnabled && 'AclsAuthz' == nameArray[1]) {
-              var newName = nameArray[0].replace('AclsAuthz', 'XASecurePDPKnox');
-              var newAuthXml = authXml[0].replace(nameArray[0], newName);
-              newTopologyXmlContentString = topologyXmlContentString.replace(authXml[0], newAuthXml);
-            } else if (!authEnabled && 'XASecurePDPKnox' == nameArray[1]) {
-              var newName = nameArray[0].replace('XASecurePDPKnox', 'AclsAuthz');
-              var newAuthXml = authXml[0].replace(nameArray[0], newName);
-              newTopologyXmlContentString = topologyXmlContentString.replace(authXml[0], newAuthXml);
-            }
-          }
-        }
-        if (newTopologyXmlContentString != null) {
-          affectedProperties.push({
-            serviceName : "KNOX",
-            sourceServiceName : "KNOX",
-            propertyName : 'content',
-            propertyDisplayName : 'content',
-            newValue : newTopologyXmlContentString,
-            curValue : topologyXmlContent.get('value'),
-            changedPropertyName : rangerPluginEnablePropertyName,
-            removed : false,
-            filename : 'topology.xml'
-          });
-        }
-      }
-    }
-    return affectedProperties;
-  }
-});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-web/app/utils/configs/modification_handlers/storm.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/modification_handlers/storm.js b/ambari-web/app/utils/configs/modification_handlers/storm.js
deleted file mode 100644
index a5fd83c..0000000
--- a/ambari-web/app/utils/configs/modification_handlers/storm.js
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-require('utils/configs/modification_handlers/modification_handler');
-
-module.exports = App.ServiceConfigModificationHandler.create({
-  serviceId : 'STORM',
-
-  getDependentConfigChanges : function(changedConfig, selectedServices, allConfigs, securityEnabled) {
-    var affectedProperties = [];
-    var newValue = changedConfig.get("value");
-    var rangerPluginEnablePropertyName = "ranger-storm-plugin-enabled";
-    var affectedPropertyName = changedConfig.get("name");
-    if (affectedPropertyName == rangerPluginEnablePropertyName) {
-      var authEnabled = newValue == "Yes";
-      var configNimbusAuthorizer = this.getConfig(allConfigs, 'nimbus.authorizer', 'storm-site.xml', 'STORM');
-      if (configNimbusAuthorizer != null) {
-        // Only when configuration is already present, do we act on it.
-        // Unsecured clusters do not have this config, and hence we skip any
-        // updates
-        var newNimbusAuthorizer = authEnabled ? (App.get('isHadoop23Stack') ? "org.apache.ranger.authorization.storm.authorizer.RangerStormAuthorizer"
-              : "com.xasecure.authorization.storm.authorizer.XaSecureStormAuthorizer")
-            : "backtype.storm.security.auth.authorizer.SimpleACLAuthorizer";
-
-        // Add Storm-Ranger configs
-        if (newNimbusAuthorizer !== configNimbusAuthorizer.get('value')) {
-          affectedProperties.push({
-            serviceName : "STORM",
-            sourceServiceName : "STORM",
-            propertyName : 'nimbus.authorizer',
-            propertyDisplayName : 'nimbus.authorizer',
-            newValue : newNimbusAuthorizer,
-            curValue : configNimbusAuthorizer.get('value'),
-            changedPropertyName : rangerPluginEnablePropertyName,
-            removed : false,
-            filename : 'storm-site.xml'
-          });
-        }
-      }
-      if (authEnabled && affectedProperties.length < 1 && !securityEnabled) {
-        App.ModalPopup.show({
-          header : Em.I18n.t('services.storm.configs.range-plugin-enable.dialog.title'),
-          primary : Em.I18n.t('ok'),
-          secondary : false,
-          showCloseButton : false,
-          onPrimary : function() {
-            this.hide();
-          },
-          body : Em.I18n.t('services.storm.configs.range-plugin-enable.dialog.message')
-        });
-      }
-    }
-    return affectedProperties;
-  }
-});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3864bc16/ambari-web/app/utils/configs/modification_handlers/yarn.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/modification_handlers/yarn.js b/ambari-web/app/utils/configs/modification_handlers/yarn.js
deleted file mode 100644
index 55bb1a9..0000000
--- a/ambari-web/app/utils/configs/modification_handlers/yarn.js
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * 'License'); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-require('utils/configs/modification_handlers/modification_handler');
-
-module.exports = App.ServiceConfigModificationHandler.create({
-  serviceId: 'YARN',
-
-  getDependentConfigChanges: function (changedConfig, selectedServices, allConfigs, securityEnabled) {
-    var affectedProperties = [],
-      newValue = changedConfig.get('value'),
-      rangerPluginEnabledName = 'ranger-yarn-plugin-enabled',
-      affectedPropertyName = changedConfig.get('name');
-    if (affectedPropertyName == rangerPluginEnabledName) {
-      var configYarnAclEnable = this.getConfig(allConfigs, 'yarn.acl.enable', 'yarn-site.xml', 'YARN'),
-        configAuthorizationProviderClass = this.getConfig(allConfigs, 'yarn.authorization-provider', 'yarn-site.xml', 'YARN'),
-        isAuthorizationProviderClassNotSet = typeof configAuthorizationProviderClass === 'undefined',
-        rangerPluginEnabled = newValue == 'Yes',
-        newYarnAclEnable = 'true',
-        newAuthorizationProviderClass = 'org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer';
-
-      // Add YARN-Ranger configs
-      if (rangerPluginEnabled) {
-        if (configYarnAclEnable != null && newYarnAclEnable !== configYarnAclEnable.get('value')) {
-          affectedProperties.push({
-            serviceName: 'YARN',
-            sourceServiceName: 'YARN',
-            propertyName: 'yarn.acl.enable',
-            propertyDisplayName: 'yarn.acl.enable',
-            newValue: newYarnAclEnable,
-            curValue: configYarnAclEnable.get('value'),
-            changedPropertyName: rangerPluginEnabledName,
-            removed: false,
-            filename: 'yarn-site.xml'
-          });
-        }
-        if (isAuthorizationProviderClassNotSet || newAuthorizationProviderClass !== configAuthorizationProviderClass.get('value')) {
-          affectedProperties.push({
-            serviceName: 'YARN',
-            sourceServiceName: 'YARN',
-            propertyName: 'yarn.authorization-provider',
-            propertyDisplayName: 'yarn.authorization-provider',
-            newValue: newAuthorizationProviderClass,
-            curValue: isAuthorizationProviderClassNotSet ? '': configAuthorizationProviderClass.get('value'),
-            changedPropertyName: rangerPluginEnabledName,
-            removed: false,
-            isNewProperty: isAuthorizationProviderClassNotSet,
-            filename: 'yarn-site.xml',
-            categoryName: 'Custom yarn-site'
-          });
-        }
-      }
-    }
-    return affectedProperties;
-  }
-});


[48/50] [abbrv] ambari git commit: AMBARI-13543. Redundant metrics in exported CSV and JSON files for Flume graphs

Posted by nc...@apache.org.
AMBARI-13543. Redundant metrics in exported CSV and JSON files for Flume graphs


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6a10db2b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6a10db2b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6a10db2b

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 6a10db2b6c81e7baf4b0cfc0acd6f69a5ef94dc2
Parents: b27212d
Author: Alex Antonenko <hi...@gmail.com>
Authored: Fri Oct 23 16:13:42 2015 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Fri Oct 23 16:13:51 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/assets/test/tests.js             |   6 +
 ambari-web/app/utils/ajax/ajax.js               |  16 +++
 .../info/metrics/flume/channel_size_mma.js      |  17 ++-
 .../service/info/metrics/flume/channel_sum.js   |  10 +-
 .../info/metrics/flume/flume_incoming_mma.js    |  17 ++-
 .../info/metrics/flume/flume_incoming_sum.js    |  10 +-
 .../info/metrics/flume/flume_outgoing_mma.js    |  17 ++-
 .../info/metrics/flume/flume_outgoing_sum.js    |  10 +-
 .../info/metrics/flume/channel_size_mma_test.js | 142 +++++++++++++++++++
 .../info/metrics/flume/channel_sum_test.js      | 108 ++++++++++++++
 .../metrics/flume/flume_incoming_mma_test.js    | 142 +++++++++++++++++++
 .../metrics/flume/flume_incoming_sum_test.js    | 108 ++++++++++++++
 .../metrics/flume/flume_outgoing_mma_test.js    | 142 +++++++++++++++++++
 .../metrics/flume/flume_outgoing_sum_test.js    | 108 ++++++++++++++
 14 files changed, 808 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6a10db2b/ambari-web/app/assets/test/tests.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/test/tests.js b/ambari-web/app/assets/test/tests.js
index f79658c..f0b5dde 100644
--- a/ambari-web/app/assets/test/tests.js
+++ b/ambari-web/app/assets/test/tests.js
@@ -257,6 +257,12 @@ var files = [
   'test/views/main/service/info/config_test',
   'test/views/main/service/info/summary_test',
   'test/views/main/service/info/metrics/ambari_metrics/regionserver_base_test',
+  'test/views/main/service/info/metrics/flume/channel_size_mma_test',
+  'test/views/main/service/info/metrics/flume/channel_sum_test',
+  'test/views/main/service/info/metrics/flume/flume_incoming_mma_test',
+  'test/views/main/service/info/metrics/flume/flume_incoming_sum_test',
+  'test/views/main/service/info/metrics/flume/flume_outgoing_mma_test',
+  'test/views/main/service/info/metrics/flume/flume_outgoing_sum_test',
   'test/views/main/service/services/ranger_test',
   'test/views/main/service/widgets/create/expression_view_test',
   'test/views/main/admin/highAvailability/nameNode/step1_view_test',

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a10db2b/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index 7fc1a23..3023cfc 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -970,6 +970,12 @@ var urls = {
   'service.metrics.flume.channel_size_for_all': {
     'real': '/clusters/{clusterName}/services/FLUME/components/FLUME_HANDLER?fields=metrics/flume/flume/CHANNEL/ChannelSize/rate[{fromSeconds},{toSeconds},{stepSeconds}]'
   },
+  'service.metrics.flume.channel_size_for_all.mma': {
+    'real': '/clusters/{clusterName}/services/FLUME/components/FLUME_HANDLER?fields=metrics/flume/flume/CHANNEL/ChannelSize/rate/avg[{fromSeconds},{toSeconds},{stepSeconds}],metrics/flume/flume/CHANNEL/ChannelSize/rate/max[{fromSeconds},{toSeconds},{stepSeconds}],metrics/flume/flume/CHANNEL/ChannelSize/rate/min[{fromSeconds},{toSeconds},{stepSeconds}]'
+  },
+  'service.metrics.flume.channel_size_for_all.sum': {
+    'real': '/clusters/{clusterName}/services/FLUME/components/FLUME_HANDLER?fields=metrics/flume/flume/CHANNEL/ChannelSize/rate/sum[{fromSeconds},{toSeconds},{stepSeconds}]'
+  },
   'service.metrics.flume.gc': {
     'real': '/clusters/{clusterName}/services/FLUME/components/FLUME_HANDLER?fields=host_components/metrics/jvm/gcTimeMillis[{fromSeconds},{toSeconds},{stepSeconds}]',
     'mock': '/data/services/metrics/flume/jvmGcTime.json',
@@ -992,10 +998,20 @@ var urls = {
   },
   'service.metrics.flume.incoming_event_put_successCount': {
     'real': '/clusters/{clusterName}/services/FLUME/components/FLUME_HANDLER?fields=metrics/flume/flume/CHANNEL/EventPutSuccessCount/rate[{fromSeconds},{toSeconds},{stepSeconds}]'
+  },'service.metrics.flume.incoming_event_put_successCount.mma': {
+    'real': '/clusters/{clusterName}/services/FLUME/components/FLUME_HANDLER?fields=metrics/flume/flume/CHANNEL/EventPutSuccessCount/rate/avg[{fromSeconds},{toSeconds},{stepSeconds}],metrics/flume/flume/CHANNEL/EventPutSuccessCount/rate/max[{fromSeconds},{toSeconds},{stepSeconds}],metrics/flume/flume/CHANNEL/EventPutSuccessCount/rate/min[{fromSeconds},{toSeconds},{stepSeconds}]'
+  },'service.metrics.flume.incoming_event_put_successCount.sum': {
+    'real': '/clusters/{clusterName}/services/FLUME/components/FLUME_HANDLER?fields=metrics/flume/flume/CHANNEL/EventPutSuccessCount/rate/sum[{fromSeconds},{toSeconds},{stepSeconds}]'
   },
   'service.metrics.flume.outgoing_event_take_success_count': {
     'real': '/clusters/{clusterName}/services/FLUME/components/FLUME_HANDLER?fields=metrics/flume/flume/CHANNEL/EventTakeSuccessCount/rate[{fromSeconds},{toSeconds},{stepSeconds}]'
   },
+  'service.metrics.flume.outgoing_event_take_success_count.mma': {
+    'real': '/clusters/{clusterName}/services/FLUME/components/FLUME_HANDLER?fields=metrics/flume/flume/CHANNEL/EventTakeSuccessCount/rate/avg[{fromSeconds},{toSeconds},{stepSeconds}],metrics/flume/flume/CHANNEL/EventTakeSuccessCount/rate/max[{fromSeconds},{toSeconds},{stepSeconds}],metrics/flume/flume/CHANNEL/EventTakeSuccessCount/rate/min[{fromSeconds},{toSeconds},{stepSeconds}]'
+  },
+  'service.metrics.flume.outgoing_event_take_success_count.sum': {
+    'real': '/clusters/{clusterName}/services/FLUME/components/FLUME_HANDLER?fields=metrics/flume/flume/CHANNEL/EventTakeSuccessCount/rate/sum[{fromSeconds},{toSeconds},{stepSeconds}]'
+  },
   'service.metrics.hbase.cluster_requests': {
     'real': '/clusters/{clusterName}/services/HBASE/components/HBASE_MASTER?fields=metrics/hbase/master/cluster_requests[{fromSeconds},{toSeconds},{stepSeconds}]',
     'mock': '/data/services/metrics/hbase/cluster_requests.json',

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a10db2b/ambari-web/app/views/main/service/info/metrics/flume/channel_size_mma.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/info/metrics/flume/channel_size_mma.js b/ambari-web/app/views/main/service/info/metrics/flume/channel_size_mma.js
index ba0fd0f..d4c1fe0 100644
--- a/ambari-web/app/views/main/service/info/metrics/flume/channel_size_mma.js
+++ b/ambari-web/app/views/main/service/info/metrics/flume/channel_size_mma.js
@@ -30,22 +30,21 @@ App.ChartServiceMetricsFlume_ChannelSizeMMA = App.ChartLinearTimeView.extend({
   id: "service-metrics-flume-channel-size-mma",
   title: Em.I18n.t('services.service.info.metrics.flume.channelSizeMMA'),
   renderer: 'line',
-  ajaxIndex: 'service.metrics.flume.channel_size_for_all',
+  ajaxIndex: 'service.metrics.flume.channel_size_for_all.mma',
   yAxisFormatter: App.ChartLinearTimeView.CreateRateFormatter('',
     App.ChartLinearTimeView.DefaultFormatter),
 
   transformToSeries: function (jsonData) {
     var seriesArray = [];
     var self = this;
+    var data = Em.get(jsonData, 'metrics.flume.flume.CHANNEL.ChannelSize.rate');
 
-    if (Em.get(jsonData, "metrics.flume.flume.CHANNEL.ChannelSize.rate")) {
-      for ( var cname in jsonData.metrics.flume.flume.CHANNEL.ChannelSize.rate) {
-        if(cname != "sum"){
-          var seriesName = Em.I18n.t('services.service.info.metrics.flume.channelType').format(cname);
-          var seriesData = jsonData.metrics.flume.flume.CHANNEL.ChannelSize.rate[cname];
-          if (seriesData) {
-            seriesArray.push(self.transformData(seriesData, seriesName));
-          }
+    if (data) {
+      for (var cname in data) {
+        var seriesName = Em.I18n.t('services.service.info.metrics.flume.channelType').format(cname);
+        var seriesData = data[cname];
+        if (seriesData) {
+          seriesArray.push(self.transformData(seriesData, seriesName));
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a10db2b/ambari-web/app/views/main/service/info/metrics/flume/channel_sum.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/info/metrics/flume/channel_sum.js b/ambari-web/app/views/main/service/info/metrics/flume/channel_sum.js
index 6056fe3..b102575 100644
--- a/ambari-web/app/views/main/service/info/metrics/flume/channel_sum.js
+++ b/ambari-web/app/views/main/service/info/metrics/flume/channel_sum.js
@@ -31,17 +31,15 @@ App.ChartServiceMetricsFlume_ChannelSizeSum = App.ChartLinearTimeView.extend({
   title: Em.I18n.t('services.service.info.metrics.flume.channelSizeSum'),
   yAxisFormatter: App.ChartLinearTimeView.BytesFormatter,
 
-  ajaxIndex: 'service.metrics.flume.channel_size_for_all',
+  ajaxIndex: 'service.metrics.flume.channel_size_for_all.sum',
 
   transformToSeries: function (jsonData) {
     var seriesArray = [];
     var self = this;
-    if(Em.get(jsonData, "metrics.flume.flume.CHANNEL.ChannelSize.rate.sum")){
+    var seriesData = Em.get(jsonData, 'metrics.flume.flume.CHANNEL.ChannelSize.rate.sum');
+    if (seriesData) {
       var seriesName = Em.I18n.t('services.service.info.metrics.flume.channelSizeSum');
-      var seriesData = jsonData.metrics.flume.flume.CHANNEL.ChannelSize.rate.sum;
-      if (seriesData) {
-        seriesArray.push(self.transformData(seriesData, seriesName));
-      }
+      seriesArray.push(self.transformData(seriesData, seriesName));
     }
     return seriesArray;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a10db2b/ambari-web/app/views/main/service/info/metrics/flume/flume_incoming_mma.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/info/metrics/flume/flume_incoming_mma.js b/ambari-web/app/views/main/service/info/metrics/flume/flume_incoming_mma.js
index 77fce83..48ee7f6 100644
--- a/ambari-web/app/views/main/service/info/metrics/flume/flume_incoming_mma.js
+++ b/ambari-web/app/views/main/service/info/metrics/flume/flume_incoming_mma.js
@@ -30,22 +30,21 @@ App.ChartServiceMetricsFlume_IncommingMMA = App.ChartLinearTimeView.extend({
   renderer: 'line',
   title: Em.I18n.t('services.service.info.metrics.flume.incoming.mma'),
 
-  ajaxIndex: 'service.metrics.flume.incoming_event_put_successCount',
+  ajaxIndex: 'service.metrics.flume.incoming_event_put_successCount.mma',
   yAxisFormatter: App.ChartLinearTimeView.CreateRateFormatter('',
       App.ChartLinearTimeView.DefaultFormatter),
 
   transformToSeries: function (jsonData) {
     var seriesArray = [];
     var self = this;
+    var data = Em.get(jsonData, 'metrics.flume.flume.CHANNEL.EventPutSuccessCount.rate');
 
-    if (Em.get(jsonData, "metrics.flume.flume.CHANNEL.EventPutSuccessCount.rate")) {
-      for ( var cname in jsonData.metrics.flume.flume.CHANNEL.EventPutSuccessCount.rate) {
-        if(cname != "sum"){
-          var seriesName = Em.I18n.t('services.service.info.metrics.flume.incoming_mma').format(cname);
-          var seriesData = jsonData.metrics.flume.flume.CHANNEL.EventPutSuccessCount.rate[cname];
-          if (seriesData) {
-            seriesArray.push(self.transformData(seriesData, seriesName));
-          }
+    if (data) {
+      for (var cname in data) {
+        var seriesName = Em.I18n.t('services.service.info.metrics.flume.incoming_mma').format(cname);
+        var seriesData = data[cname];
+        if (seriesData) {
+          seriesArray.push(self.transformData(seriesData, seriesName));
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a10db2b/ambari-web/app/views/main/service/info/metrics/flume/flume_incoming_sum.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/info/metrics/flume/flume_incoming_sum.js b/ambari-web/app/views/main/service/info/metrics/flume/flume_incoming_sum.js
index 973719b..51a1287 100644
--- a/ambari-web/app/views/main/service/info/metrics/flume/flume_incoming_sum.js
+++ b/ambari-web/app/views/main/service/info/metrics/flume/flume_incoming_sum.js
@@ -29,17 +29,15 @@ App.ChartServiceMetricsFlume_IncommingSum = App.ChartLinearTimeView.extend({
   id: "service-metrics-flume-incoming_sum",
   title: Em.I18n.t('services.service.info.metrics.flume.incoming.sum'),
 
-  ajaxIndex: 'service.metrics.flume.incoming_event_put_successCount',
+  ajaxIndex: 'service.metrics.flume.incoming_event_put_successCount.sum',
 
   transformToSeries: function (jsonData) {
     var seriesArray = [];
     var self = this;
-    if(Em.get(jsonData, "metrics.flume.flume.CHANNEL.EventPutSuccessCount.rate.sum")){
+    var seriesData = Em.get(jsonData, 'metrics.flume.flume.CHANNEL.EventPutSuccessCount.rate.sum');
+    if (seriesData) {
       var seriesName = Em.I18n.t('services.service.info.metrics.flume.incoming.sum');
-      var seriesData = jsonData.metrics.flume.flume.CHANNEL.EventPutSuccessCount.rate.sum;
-      if (seriesData) {
-        seriesArray.push(self.transformData(seriesData, seriesName));
-      }
+      seriesArray.push(self.transformData(seriesData, seriesName));
     }
     return seriesArray;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a10db2b/ambari-web/app/views/main/service/info/metrics/flume/flume_outgoing_mma.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/info/metrics/flume/flume_outgoing_mma.js b/ambari-web/app/views/main/service/info/metrics/flume/flume_outgoing_mma.js
index 311a472..ea1b947 100644
--- a/ambari-web/app/views/main/service/info/metrics/flume/flume_outgoing_mma.js
+++ b/ambari-web/app/views/main/service/info/metrics/flume/flume_outgoing_mma.js
@@ -30,22 +30,21 @@ App.ChartServiceMetricsFlume_OutgoingMMA = App.ChartLinearTimeView.extend({
   renderer: 'line',
   title: Em.I18n.t('services.service.info.metrics.flume.outgoing.mma'),
 
-  ajaxIndex: 'service.metrics.flume.outgoing_event_take_success_count',
+  ajaxIndex: 'service.metrics.flume.outgoing_event_take_success_count.mma',
   yAxisFormatter: App.ChartLinearTimeView.CreateRateFormatter('',
       App.ChartLinearTimeView.DefaultFormatter),
 
   transformToSeries: function (jsonData) {
     var seriesArray = [];
     var self = this;
+    var data = Em.get(jsonData, 'metrics.flume.flume.CHANNEL.EventTakeSuccessCount.rate');
 
-    if (Em.get(jsonData, "metrics.flume.flume.CHANNEL.EventTakeSuccessCount.rate")) {
-      for ( var cname in jsonData.metrics.flume.flume.CHANNEL.EventTakeSuccessCount.rate) {
-        if(cname != "sum"){
-          var seriesName = Em.I18n.t('services.service.info.metrics.flume.outgoing_mma').format(cname);
-          var seriesData = jsonData.metrics.flume.flume.CHANNEL.EventTakeSuccessCount.rate[cname];
-          if (seriesData) {
-            seriesArray.push(self.transformData(seriesData, seriesName));
-          }
+    if (data) {
+      for (var cname in data) {
+        var seriesName = Em.I18n.t('services.service.info.metrics.flume.outgoing_mma').format(cname);
+        var seriesData = data[cname];
+        if (seriesData) {
+          seriesArray.push(self.transformData(seriesData, seriesName));
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a10db2b/ambari-web/app/views/main/service/info/metrics/flume/flume_outgoing_sum.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/info/metrics/flume/flume_outgoing_sum.js b/ambari-web/app/views/main/service/info/metrics/flume/flume_outgoing_sum.js
index 50f8be0..8862f8c 100644
--- a/ambari-web/app/views/main/service/info/metrics/flume/flume_outgoing_sum.js
+++ b/ambari-web/app/views/main/service/info/metrics/flume/flume_outgoing_sum.js
@@ -29,17 +29,15 @@ App.ChartServiceMetricsFlume_OutgoingSum = App.ChartLinearTimeView.extend({
   id: "service-metrics-flume-outgoing_sum",
   title: Em.I18n.t('services.service.info.metrics.flume.outgoing.sum'),
 
-  ajaxIndex: 'service.metrics.flume.outgoing_event_take_success_count',
+  ajaxIndex: 'service.metrics.flume.outgoing_event_take_success_count.sum',
 
   transformToSeries: function (jsonData) {
     var seriesArray = [];
     var self = this;
-    if(Em.get(jsonData, "metrics.flume.flume.CHANNEL.EventTakeSuccessCount.rate.sum")){
+    var seriesData = Em.get(jsonData, 'metrics.flume.flume.CHANNEL.EventTakeSuccessCount.rate.sum');
+    if (seriesData) {
       var seriesName = Em.I18n.t('services.service.info.metrics.flume.outgoing.sum');
-      var seriesData = jsonData.metrics.flume.flume.CHANNEL.EventTakeSuccessCount.rate.sum;
-      if (seriesData) {
-        seriesArray.push(self.transformData(seriesData, seriesName));
-      }
+      seriesArray.push(self.transformData(seriesData, seriesName));
     }
     return seriesArray;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a10db2b/ambari-web/test/views/main/service/info/metrics/flume/channel_size_mma_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/service/info/metrics/flume/channel_size_mma_test.js b/ambari-web/test/views/main/service/info/metrics/flume/channel_size_mma_test.js
new file mode 100644
index 0000000..16068cd
--- /dev/null
+++ b/ambari-web/test/views/main/service/info/metrics/flume/channel_size_mma_test.js
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+require('views/main/service/info/metrics/flume/channel_size_mma');
+
+describe('App.ChartServiceMetricsFlume_ChannelSizeMMA', function () {
+
+  var view;
+
+  beforeEach(function () {
+    view = App.ChartServiceMetricsFlume_ChannelSizeMMA.create();
+  });
+
+  describe('#transformToSeries', function () {
+
+    var cases = [
+      {
+        data: {},
+        seriesArray: [],
+        title: 'empty response'
+      },
+      {
+        data: {
+          metrics: {}
+        },
+        seriesArray: [],
+        title: 'invalid response'
+      },
+      {
+        data: {
+          metrics: {
+            flume: {
+              flume: {
+                CHANNEL: {
+                  ChannelSize: {
+                    rate: null
+                  }
+                }
+              }
+            }
+          }
+        },
+        seriesArray: [],
+        title: 'empty data'
+      },
+      {
+        data: {
+          metrics: {
+            flume: {
+              flume: {
+                CHANNEL: {
+                  ChannelSize: {
+                    rate: {
+                      avg: [
+                        [0, 1445472000],
+                        [1, 1445472015]
+                      ],
+                      max: [
+                        [2, 1445472000],
+                        [3, 1445472015]
+                      ],
+                      min: [
+                        [4, 1445472000],
+                        [5, 1445472015]
+                      ]
+                    }
+                  }
+                }
+              }
+            }
+          }
+        },
+        seriesArray: [
+          {
+            name: Em.I18n.t('services.service.info.metrics.flume.channelType').format('avg'),
+            data: [
+              {
+                x: 1445472000,
+                y: 0
+              },
+              {
+                x: 1445472015,
+                y: 1
+              }
+            ]
+          },
+          {
+            name: Em.I18n.t('services.service.info.metrics.flume.channelType').format('max'),
+            data: [
+              {
+                x: 1445472000,
+                y: 2
+              },
+              {
+                x: 1445472015,
+                y: 3
+              }
+            ]
+          },
+          {
+            name: Em.I18n.t('services.service.info.metrics.flume.channelType').format('min'),
+            data: [
+              {
+                x: 1445472000,
+                y: 4
+              },
+              {
+                x: 1445472015,
+                y: 5
+              }
+            ]
+          }
+        ],
+        title: 'valid data'
+      }
+    ];
+
+    cases.forEach(function (item) {
+      it(item.title, function () {
+        expect(view.transformToSeries(item.data)).to.eql(item.seriesArray);
+      });
+    });
+
+  });
+
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a10db2b/ambari-web/test/views/main/service/info/metrics/flume/channel_sum_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/service/info/metrics/flume/channel_sum_test.js b/ambari-web/test/views/main/service/info/metrics/flume/channel_sum_test.js
new file mode 100644
index 0000000..03ad3f7
--- /dev/null
+++ b/ambari-web/test/views/main/service/info/metrics/flume/channel_sum_test.js
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+require('views/main/service/info/metrics/flume/channel_sum');
+
+describe('App.ChartServiceMetricsFlume_ChannelSizeSum', function () {
+
+  var view;
+
+  beforeEach(function () {
+    view = App.ChartServiceMetricsFlume_ChannelSizeSum.create();
+  });
+
+  describe('#transformToSeries', function () {
+
+    var cases = [
+      {
+        data: {},
+        seriesArray: [],
+        title: 'empty response'
+      },
+      {
+        data: {
+          metrics: {}
+        },
+        seriesArray: [],
+        title: 'invalid response'
+      },
+      {
+        data: {
+          metrics: {
+            flume: {
+              flume: {
+                CHANNEL: {
+                  ChannelSize: {
+                    rate: null
+                  }
+                }
+              }
+            }
+          }
+        },
+        seriesArray: [],
+        title: 'empty data'
+      },
+      {
+        data: {
+          metrics: {
+            flume: {
+              flume: {
+                CHANNEL: {
+                  ChannelSize: {
+                    rate: {
+                      sum: [
+                        [0, 1445472000],
+                        [1, 1445472015]
+                      ]
+                    }
+                  }
+                }
+              }
+            }
+          }
+        },
+        seriesArray: [
+          {
+            name: Em.I18n.t('services.service.info.metrics.flume.channelSizeSum'),
+            data: [
+              {
+                x: 1445472000,
+                y: 0
+              },
+              {
+                x: 1445472015,
+                y: 1
+              }
+            ]
+          }
+        ],
+        title: 'valid data'
+      }
+    ];
+
+    cases.forEach(function (item) {
+      it(item.title, function () {
+        expect(view.transformToSeries(item.data)).to.eql(item.seriesArray);
+      });
+    });
+
+  });
+
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a10db2b/ambari-web/test/views/main/service/info/metrics/flume/flume_incoming_mma_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/service/info/metrics/flume/flume_incoming_mma_test.js b/ambari-web/test/views/main/service/info/metrics/flume/flume_incoming_mma_test.js
new file mode 100644
index 0000000..50748c5
--- /dev/null
+++ b/ambari-web/test/views/main/service/info/metrics/flume/flume_incoming_mma_test.js
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+require('views/main/service/info/metrics/flume/flume_incoming_mma');
+
+describe('App.ChartServiceMetricsFlume_IncommingMMA', function () {
+
+  var view;
+
+  beforeEach(function () {
+    view = App.ChartServiceMetricsFlume_IncommingMMA.create();
+  });
+
+  describe('#transformToSeries', function () {
+
+    var cases = [
+      {
+        data: {},
+        seriesArray: [],
+        title: 'empty response'
+      },
+      {
+        data: {
+          metrics: {}
+        },
+        seriesArray: [],
+        title: 'invalid response'
+      },
+      {
+        data: {
+          metrics: {
+            flume: {
+              flume: {
+                CHANNEL: {
+                  EventPutSuccessCount: {
+                    rate: null
+                  }
+                }
+              }
+            }
+          }
+        },
+        seriesArray: [],
+        title: 'empty data'
+      },
+      {
+        data: {
+          metrics: {
+            flume: {
+              flume: {
+                CHANNEL: {
+                  EventPutSuccessCount: {
+                    rate: {
+                      avg: [
+                        [0, 1445472000],
+                        [1, 1445472015]
+                      ],
+                      max: [
+                        [2, 1445472000],
+                        [3, 1445472015]
+                      ],
+                      min: [
+                        [4, 1445472000],
+                        [5, 1445472015]
+                      ]
+                    }
+                  }
+                }
+              }
+            }
+          }
+        },
+        seriesArray: [
+          {
+            name: Em.I18n.t('services.service.info.metrics.flume.incoming_mma').format('avg'),
+            data: [
+              {
+                x: 1445472000,
+                y: 0
+              },
+              {
+                x: 1445472015,
+                y: 1
+              }
+            ]
+          },
+          {
+            name: Em.I18n.t('services.service.info.metrics.flume.incoming_mma').format('max'),
+            data: [
+              {
+                x: 1445472000,
+                y: 2
+              },
+              {
+                x: 1445472015,
+                y: 3
+              }
+            ]
+          },
+          {
+            name: Em.I18n.t('services.service.info.metrics.flume.incoming_mma').format('min'),
+            data: [
+              {
+                x: 1445472000,
+                y: 4
+              },
+              {
+                x: 1445472015,
+                y: 5
+              }
+            ]
+          }
+        ],
+        title: 'valid data'
+      }
+    ];
+
+    cases.forEach(function (item) {
+      it(item.title, function () {
+        expect(view.transformToSeries(item.data)).to.eql(item.seriesArray);
+      });
+    });
+
+  });
+
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a10db2b/ambari-web/test/views/main/service/info/metrics/flume/flume_incoming_sum_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/service/info/metrics/flume/flume_incoming_sum_test.js b/ambari-web/test/views/main/service/info/metrics/flume/flume_incoming_sum_test.js
new file mode 100644
index 0000000..0a20a9d
--- /dev/null
+++ b/ambari-web/test/views/main/service/info/metrics/flume/flume_incoming_sum_test.js
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+require('views/main/service/info/metrics/flume/flume_incoming_sum');
+
+describe('App.ChartServiceMetricsFlume_IncommingSum', function () {
+
+  var view;
+
+  beforeEach(function () {
+    view = App.ChartServiceMetricsFlume_IncommingSum.create();
+  });
+
+  describe('#transformToSeries', function () {
+
+    var cases = [
+      {
+        data: {},
+        seriesArray: [],
+        title: 'empty response'
+      },
+      {
+        data: {
+          metrics: {}
+        },
+        seriesArray: [],
+        title: 'invalid response'
+      },
+      {
+        data: {
+          metrics: {
+            flume: {
+              flume: {
+                CHANNEL: {
+                  EventPutSuccessCount: {
+                    rate: null
+                  }
+                }
+              }
+            }
+          }
+        },
+        seriesArray: [],
+        title: 'empty data'
+      },
+      {
+        data: {
+          metrics: {
+            flume: {
+              flume: {
+                CHANNEL: {
+                  EventPutSuccessCount: {
+                    rate: {
+                      sum: [
+                        [0, 1445472000],
+                        [1, 1445472015]
+                      ]
+                    }
+                  }
+                }
+              }
+            }
+          }
+        },
+        seriesArray: [
+          {
+            name: Em.I18n.t('services.service.info.metrics.flume.incoming.sum'),
+            data: [
+              {
+                x: 1445472000,
+                y: 0
+              },
+              {
+                x: 1445472015,
+                y: 1
+              }
+            ]
+          }
+        ],
+        title: 'valid data'
+      }
+    ];
+
+    cases.forEach(function (item) {
+      it(item.title, function () {
+        expect(view.transformToSeries(item.data)).to.eql(item.seriesArray);
+      });
+    });
+
+  });
+
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a10db2b/ambari-web/test/views/main/service/info/metrics/flume/flume_outgoing_mma_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/service/info/metrics/flume/flume_outgoing_mma_test.js b/ambari-web/test/views/main/service/info/metrics/flume/flume_outgoing_mma_test.js
new file mode 100644
index 0000000..f00dde4
--- /dev/null
+++ b/ambari-web/test/views/main/service/info/metrics/flume/flume_outgoing_mma_test.js
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+require('views/main/service/info/metrics/flume/flume_outgoing_mma');
+
+describe('App.ChartServiceMetricsFlume_OutgoingMMA', function () {
+
+  var view;
+
+  beforeEach(function () {
+    view = App.ChartServiceMetricsFlume_OutgoingMMA.create();
+  });
+
+  describe('#transformToSeries', function () {
+
+    var cases = [
+      {
+        data: {},
+        seriesArray: [],
+        title: 'empty response'
+      },
+      {
+        data: {
+          metrics: {}
+        },
+        seriesArray: [],
+        title: 'invalid response'
+      },
+      {
+        data: {
+          metrics: {
+            flume: {
+              flume: {
+                CHANNEL: {
+                  EventTakeSuccessCount: {
+                    rate: null
+                  }
+                }
+              }
+            }
+          }
+        },
+        seriesArray: [],
+        title: 'empty data'
+      },
+      {
+        data: {
+          metrics: {
+            flume: {
+              flume: {
+                CHANNEL: {
+                  EventTakeSuccessCount: {
+                    rate: {
+                      avg: [
+                        [0, 1445472000],
+                        [1, 1445472015]
+                      ],
+                      max: [
+                        [2, 1445472000],
+                        [3, 1445472015]
+                      ],
+                      min: [
+                        [4, 1445472000],
+                        [5, 1445472015]
+                      ]
+                    }
+                  }
+                }
+              }
+            }
+          }
+        },
+        seriesArray: [
+          {
+            name: Em.I18n.t('services.service.info.metrics.flume.outgoing_mma').format('avg'),
+            data: [
+              {
+                x: 1445472000,
+                y: 0
+              },
+              {
+                x: 1445472015,
+                y: 1
+              }
+            ]
+          },
+          {
+            name: Em.I18n.t('services.service.info.metrics.flume.outgoing_mma').format('max'),
+            data: [
+              {
+                x: 1445472000,
+                y: 2
+              },
+              {
+                x: 1445472015,
+                y: 3
+              }
+            ]
+          },
+          {
+            name: Em.I18n.t('services.service.info.metrics.flume.outgoing_mma').format('min'),
+            data: [
+              {
+                x: 1445472000,
+                y: 4
+              },
+              {
+                x: 1445472015,
+                y: 5
+              }
+            ]
+          }
+        ],
+        title: 'valid data'
+      }
+    ];
+
+    cases.forEach(function (item) {
+      it(item.title, function () {
+        expect(view.transformToSeries(item.data)).to.eql(item.seriesArray);
+      });
+    });
+
+  });
+
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a10db2b/ambari-web/test/views/main/service/info/metrics/flume/flume_outgoing_sum_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/service/info/metrics/flume/flume_outgoing_sum_test.js b/ambari-web/test/views/main/service/info/metrics/flume/flume_outgoing_sum_test.js
new file mode 100644
index 0000000..9c725d1
--- /dev/null
+++ b/ambari-web/test/views/main/service/info/metrics/flume/flume_outgoing_sum_test.js
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+require('views/main/service/info/metrics/flume/flume_outgoing_sum');
+
+describe('App.ChartServiceMetricsFlume_OutgoingSum', function () {
+
+  var view;
+
+  beforeEach(function () {
+    view = App.ChartServiceMetricsFlume_OutgoingSum.create();
+  });
+
+  describe('#transformToSeries', function () {
+
+    var cases = [
+      {
+        data: {},
+        seriesArray: [],
+        title: 'empty response'
+      },
+      {
+        data: {
+          metrics: {}
+        },
+        seriesArray: [],
+        title: 'invalid response'
+      },
+      {
+        data: {
+          metrics: {
+            flume: {
+              flume: {
+                CHANNEL: {
+                  EventTakeSuccessCount: {
+                    rate: null
+                  }
+                }
+              }
+            }
+          }
+        },
+        seriesArray: [],
+        title: 'empty data'
+      },
+      {
+        data: {
+          metrics: {
+            flume: {
+              flume: {
+                CHANNEL: {
+                  EventTakeSuccessCount: {
+                    rate: {
+                      sum: [
+                        [0, 1445472000],
+                        [1, 1445472015]
+                      ]
+                    }
+                  }
+                }
+              }
+            }
+          }
+        },
+        seriesArray: [
+          {
+            name: Em.I18n.t('services.service.info.metrics.flume.outgoing.sum'),
+            data: [
+              {
+                x: 1445472000,
+                y: 0
+              },
+              {
+                x: 1445472015,
+                y: 1
+              }
+            ]
+          }
+        ],
+        title: 'valid data'
+      }
+    ];
+
+    cases.forEach(function (item) {
+      it(item.title, function () {
+        expect(view.transformToSeries(item.data)).to.eql(item.seriesArray);
+      });
+    });
+
+  });
+
+});


[49/50] [abbrv] ambari git commit: AMBARI-13542 Error count on Ranger service tab does not match the total error count on configs tab. (atkach)

Posted by nc...@apache.org.
AMBARI-13542 Error count on Ranger service tab does not match the total error count on configs tab. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2ac17444
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2ac17444
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2ac17444

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 2ac17444bcc4f542507188250ebd835ae067e87e
Parents: 6a10db2
Author: Andrii Tkach <at...@hortonworks.com>
Authored: Fri Oct 23 16:34:12 2015 +0300
Committer: Andrii Tkach <at...@hortonworks.com>
Committed: Fri Oct 23 16:34:12 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/models/configs/objects/service_config.js        | 2 +-
 .../app/models/configs/objects/service_config_property.js      | 6 ++++++
 .../app/views/common/configs/widgets/config_widget_view.js     | 1 +
 3 files changed, 8 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2ac17444/ambari-web/app/models/configs/objects/service_config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/objects/service_config.js b/ambari-web/app/models/configs/objects/service_config.js
index 492f1c0..c4bad57 100644
--- a/ambari-web/app/models/configs/objects/service_config.js
+++ b/ambari-web/app/models/configs/objects/service_config.js
@@ -61,7 +61,7 @@ App.ServiceConfig = Ember.Object.extend({
         category.incrementProperty('nonSlaveErrorCount');
         masterErrors++;
       }
-      if (!item.get('isValid') && item.get('widget') && item.get('isVisible')) {
+      if (!item.get('isValid') && item.get('widget') && item.get('isVisible') && !item.get('hiddenBySection')) {
         enhancedConfigsErrors++;
       }
       if (item.get('overrides')) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/2ac17444/ambari-web/app/models/configs/objects/service_config_property.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/objects/service_config_property.js b/ambari-web/app/models/configs/objects/service_config_property.js
index 2c7aa87..1124d54 100644
--- a/ambari-web/app/models/configs/objects/service_config_property.js
+++ b/ambari-web/app/models/configs/objects/service_config_property.js
@@ -161,6 +161,12 @@ App.ServiceConfigProperty = Em.Object.extend({
   showAsTextBox: false,
 
   /**
+   * config is invisible since wrapper section is hidden
+   * @type {boolean}
+   */
+  hiddenBySection: false,
+
+  /**
    * @type {boolean}
    */
   recommendedValueExists: function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/2ac17444/ambari-web/app/views/common/configs/widgets/config_widget_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/widgets/config_widget_view.js b/ambari-web/app/views/common/configs/widgets/config_widget_view.js
index 9e120ce..934d9a9 100644
--- a/ambari-web/app/views/common/configs/widgets/config_widget_view.js
+++ b/ambari-web/app/views/common/configs/widgets/config_widget_view.js
@@ -492,6 +492,7 @@ App.ConfigWidgetView = Em.View.extend(App.SupportsDependentConfigs, App.WidgetPo
           themeResource = App.SubSectionTab.find().findProperty('name', subsectionConditionName);
         }
         themeResource.set('isHiddenByConfig', !valueAttributes['visible']);
+        themeResource.get('configs').setEach('hiddenBySection', !valueAttributes['visible']);
       }
     }
   },


[31/50] [abbrv] ambari git commit: AMBARI-13517. Ambari Server JVM crashed after several clicks in Web UI to navigate graph timerange. (swagle)

Posted by nc...@apache.org.
AMBARI-13517. Ambari Server JVM crashed after several clicks in Web UI to navigate graph timerange. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/02943430
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/02943430
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/02943430

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 0294343025e5a9bd19a69c1ca3d4c69c1ecbaa62
Parents: 3b9b7c7
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Thu Oct 22 13:38:20 2015 -0700
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Thu Oct 22 13:38:20 2015 -0700

----------------------------------------------------------------------
 .../metrics2/sink/timeline/TimelineMetric.java  |   7 +-
 .../timeline/cache/TimelineMetricsCache.java    |   2 +-
 .../cache/TimelineMetricsCacheTest.java         |   2 +-
 .../timeline/HBaseTimelineMetricStore.java      |   2 +-
 .../metrics/timeline/PhoenixHBaseAccessor.java  |  21 +--
 .../aggregators/TimelineMetricReadHelper.java   |   2 +-
 .../metrics/timeline/ITClusterAggregator.java   |   5 +-
 .../metrics/timeline/ITMetricAggregator.java    |   3 +-
 .../metrics/timeline/MetricTestHelper.java      |   3 +-
 .../timeline/TestTimelineMetricStore.java       |   5 +-
 .../metrics/MetricsPaddingMethod.java           |  10 +-
 .../cache/TimelineMetricCacheProvider.java      |  29 ++--
 .../cache/TimelineMetricsCacheSizeOfEngine.java | 137 +++++++++++++++++++
 .../timeline/MetricsPaddingMethodTest.java      |   2 +-
 .../cache/TimelineMetricCacheSizingTest.java    | 110 +++++++++++++++
 .../timeline/cache/TimelineMetricCacheTest.java |   4 +-
 16 files changed, 300 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
index 8b8df06..e4dc423 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
@@ -27,6 +27,7 @@ import javax.xml.bind.annotation.XmlRootElement;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.codehaus.jackson.map.annotate.JsonDeserialize;
 
 @XmlRootElement(name = "metric")
 @XmlAccessorType(XmlAccessType.NONE)
@@ -41,7 +42,7 @@ public class TimelineMetric implements Comparable<TimelineMetric> {
   private long timestamp;
   private long startTime;
   private String type;
-  private Map<Long, Double> metricValues = new TreeMap<Long, Double>();
+  private TreeMap<Long, Double> metricValues = new TreeMap<Long, Double>();
 
   // default
   public TimelineMetric() {
@@ -124,11 +125,11 @@ public class TimelineMetric implements Comparable<TimelineMetric> {
   }
 
   @XmlElement(name = "metrics")
-  public Map<Long, Double> getMetricValues() {
+  public TreeMap<Long, Double> getMetricValues() {
     return metricValues;
   }
 
-  public void setMetricValues(Map<Long, Double> metricValues) {
+  public void setMetricValues(TreeMap<Long, Double> metricValues) {
     this.metricValues = metricValues;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/cache/TimelineMetricsCache.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/cache/TimelineMetricsCache.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/cache/TimelineMetricsCache.java
index 77a5499..4e9e36e 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/cache/TimelineMetricsCache.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/cache/TimelineMetricsCache.java
@@ -161,7 +161,7 @@ public class TimelineMetricsCache {
     Double value = counterMetricLastValue.get(metricName);
     double previousValue = value != null ? value : firstValue;
     Map<Long, Double> metricValues = timelineMetric.getMetricValues();
-    Map<Long, Double>   newMetricValues = new TreeMap<Long, Double>();
+    TreeMap<Long, Double>   newMetricValues = new TreeMap<Long, Double>();
     for (Map.Entry<Long, Double> entry : metricValues.entrySet()) {
       newMetricValues.put(entry.getKey(), entry.getValue() - previousValue);
       previousValue = entry.getValue();

http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/TimelineMetricsCacheTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/TimelineMetricsCacheTest.java b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/TimelineMetricsCacheTest.java
index 4a13d63..ad98525 100644
--- a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/TimelineMetricsCacheTest.java
+++ b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/TimelineMetricsCacheTest.java
@@ -80,7 +80,7 @@ public class TimelineMetricsCacheTest {
     timelineMetric.setAppId("test serviceName");
     timelineMetric.setStartTime(startTime);
     timelineMetric.setType("Number");
-    timelineMetric.setMetricValues(metricValues);
+    timelineMetric.setMetricValues(new TreeMap<Long, Double>(metricValues));
     return timelineMetric;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
index 9c0b94d..52cef59 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
@@ -275,7 +275,7 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
       metric.setHostName(metricList.get(0).getHostName());
       // Assumption that metrics are ordered by start time
       metric.setStartTime(metricList.get(0).getStartTime());
-      Map<Long, Double> metricRecords = new TreeMap<Long, Double>();
+      TreeMap<Long, Double> metricRecords = new TreeMap<Long, Double>();
       for (TimelineMetric timelineMetric : metricList) {
         metricRecords.putAll(timelineMetric.getMetricValues());
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
index 9488316..1ed2a72 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
@@ -52,6 +52,7 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
 
 import static java.util.concurrent.TimeUnit.SECONDS;
@@ -187,19 +188,19 @@ public class PhoenixHBaseAccessor {
     return metric;
   }
 
-  private static Map<Long, Double> readLastMetricValueFromJSON(String json)
-    throws IOException {
-    Map<Long, Double> values = readMetricFromJSON(json);
-    Long lastTimeStamp = Collections.max(values.keySet());
+  private static TreeMap<Long, Double> readLastMetricValueFromJSON(String json)
+      throws IOException {
+    TreeMap<Long, Double> values = readMetricFromJSON(json);
+    Long lastTimeStamp = values.lastKey();
 
-    HashMap<Long, Double> valueMap = new HashMap<Long, Double>(1);
+    TreeMap<Long, Double> valueMap = new TreeMap<Long, Double>();
     valueMap.put(lastTimeStamp, values.get(lastTimeStamp));
     return valueMap;
   }
 
   @SuppressWarnings("unchecked")
-  public static Map<Long, Double>  readMetricFromJSON(String json) throws IOException {
-    return (Map<Long, Double>) mapper.readValue(json, metricValuesTypeRef);
+  public static TreeMap<Long, Double>  readMetricFromJSON(String json) throws IOException {
+    return (TreeMap<Long, Double>) mapper.readValue(json, metricValuesTypeRef);
   }
 
   private Connection getConnectionRetryingOnException()
@@ -467,8 +468,10 @@ public class PhoenixHBaseAccessor {
       // which is thrown in hbase TimeRange.java
       Throwable io = ex.getCause();
       String className = null;
-      for (StackTraceElement ste : io.getStackTrace()) {
-        className = ste.getClassName();
+      if (io != null) {
+        for (StackTraceElement ste : io.getStackTrace()) {
+          className = ste.getClassName();
+        }
       }
       if (className != null && className.equals("TimeRange")) {
         // This is "maxStamp is smaller than minStamp" exception

http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
index 573e09d..dc27614 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
@@ -40,7 +40,7 @@ public class TimelineMetricReadHelper {
   public TimelineMetric getTimelineMetricFromResultSet(ResultSet rs)
       throws SQLException, IOException {
     TimelineMetric metric = getTimelineMetricCommonsFromResultSet(rs);
-    Map<Long, Double> sortedByTimeMetrics = new TreeMap<Long, Double>(
+    TreeMap<Long, Double> sortedByTimeMetrics = new TreeMap<Long, Double>(
         PhoenixHBaseAccessor.readMetricFromJSON(rs.getString("METRICS")));
     metric.setMetricValues(sortedByTimeMetrics);
     return metric;

http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITClusterAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITClusterAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITClusterAggregator.java
index b7b1737..cbf0233 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITClusterAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITClusterAggregator.java
@@ -47,6 +47,7 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.TreeMap;
 
 import static junit.framework.Assert.assertEquals;
 import static junit.framework.Assert.assertNotNull;
@@ -503,7 +504,7 @@ public class ITClusterAggregator extends AbstractMiniHBaseClusterTest {
     metric1.setAppId("resourcemanager");
     metric1.setHostName("h1");
     metric1.setStartTime(1431372311811l);
-    metric1.setMetricValues(new HashMap<Long, Double>() {{
+    metric1.setMetricValues(new TreeMap<Long, Double>() {{
       put(1431372311811l, 1.0);
       put(1431372321811l, 1.0);
       put(1431372331811l, 1.0);
@@ -518,7 +519,7 @@ public class ITClusterAggregator extends AbstractMiniHBaseClusterTest {
     metric2.setAppId("resourcemanager");
     metric2.setHostName("h1");
     metric2.setStartTime(1431372381810l);
-    metric2.setMetricValues(new HashMap<Long, Double>() {{
+    metric2.setMetricValues(new TreeMap<Long, Double>() {{
       put(1431372381810l, 1.0);
       put(1431372391811l, 1.0);
       put(1431372401811l, 1.0);

http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITMetricAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITMetricAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITMetricAggregator.java
index a3640d0..e9c25cf 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITMetricAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITMetricAggregator.java
@@ -42,6 +42,7 @@ import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.TreeMap;
 
 import static junit.framework.Assert.assertEquals;
 import static junit.framework.Assert.assertTrue;
@@ -383,7 +384,7 @@ public class ITMetricAggregator extends AbstractMiniHBaseClusterTest {
     m.setHostName(host);
     m.setMetricName(metricName);
     m.setStartTime(startTime);
-    Map<Long, Double> vals = new HashMap<Long, Double>();
+    TreeMap<Long, Double> vals = new TreeMap<Long, Double>();
     vals.put(startTime + 15000l, 0.0);
     vals.put(startTime + 30000l, 0.0);
     vals.put(startTime + 45000l, 1.0);

http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
index 26771d7..37ec134 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.TreeMap;
 
 public class MetricTestHelper {
 
@@ -83,7 +84,7 @@ public class MetricTestHelper {
     m.setInstanceId(instanceId);
     m.setMetricName(metricName);
     m.setStartTime(startTime);
-    Map<Long, Double> vals = new HashMap<Long, Double>();
+    TreeMap<Long, Double> vals = new TreeMap<Long, Double>();
     vals.put(startTime + 15000l, val);
     vals.put(startTime + 30000l, val);
     vals.put(startTime + 45000l, val);

http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
index b0aad57..4b5bfe0 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
@@ -25,6 +25,7 @@ import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
+import java.util.TreeMap;
 
 public class TestTimelineMetricStore implements TimelineMetricStore {
   @Override
@@ -44,7 +45,7 @@ public class TestTimelineMetricStore implements TimelineMetricStore {
     metric1.setInstanceId(null);
     metric1.setHostName("c6401");
     metric1.setStartTime(1407949812L);
-    metric1.setMetricValues(new HashMap<Long, Double>() {{
+    metric1.setMetricValues(new TreeMap<Long, Double>() {{
       put(1407949812L, 1.0d);
       put(1407949912L, 1.8d);
       put(1407950002L, 0.7d);
@@ -55,7 +56,7 @@ public class TestTimelineMetricStore implements TimelineMetricStore {
     metric2.setInstanceId("3");
     metric2.setHostName("c6401");
     metric2.setStartTime(1407949812L);
-    metric2.setMetricValues(new HashMap<Long, Double>() {{
+    metric2.setMetricValues(new TreeMap<Long, Double>() {{
       put(1407949812L, 2.5d);
       put(1407949912L, 3.0d);
       put(1407950002L, 0.9d);

http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsPaddingMethod.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsPaddingMethod.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsPaddingMethod.java
index 522c0bd..ccfb713 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsPaddingMethod.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsPaddingMethod.java
@@ -52,15 +52,7 @@ public class MetricsPaddingMethod {
       return;
     }
 
-    TreeMap<Long, Double> values;
-    Map<Long, Double> metricValuesMap = metric.getMetricValues();
-    if (metricValuesMap instanceof TreeMap) {
-      values = (TreeMap<Long, Double>) metricValuesMap;
-    }
-    else {
-      // JSON dser returns LinkedHashMap that is not Navigable
-      values = new TreeMap<Long, Double>(metricValuesMap);
-    }
+    TreeMap<Long, Double> values = metric.getMetricValues();
 
     long dataInterval = getTimelineMetricInterval(values);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricCacheProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricCacheProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricCacheProvider.java
index 6d80687..9f268b2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricCacheProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricCacheProvider.java
@@ -23,6 +23,8 @@ import net.sf.ehcache.Cache;
 import net.sf.ehcache.CacheManager;
 import net.sf.ehcache.config.CacheConfiguration;
 import net.sf.ehcache.config.PersistenceConfiguration;
+import net.sf.ehcache.config.SizeOfPolicyConfiguration;
+import net.sf.ehcache.config.SizeOfPolicyConfiguration.MaxDepthExceededBehavior;
 import net.sf.ehcache.store.MemoryStoreEvictionPolicy;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.commons.lang.StringUtils;
@@ -40,6 +42,7 @@ import static net.sf.ehcache.config.PersistenceConfiguration.*;
 public class TimelineMetricCacheProvider {
   private TimelineMetricCache timelineMetricsCache;
   private volatile boolean isCacheInitialized = false;
+  public static final String TIMELINE_METRIC_CACHE_MANAGER_NAME = "timelineMetricCacheManager";
   public static final String TIMELINE_METRIC_CACHE_INSTANCE_NAME = "timelineMetricCache";
 
   Configuration configuration;
@@ -61,8 +64,13 @@ public class TimelineMetricCacheProvider {
     }
 
     System.setProperty("net.sf.ehcache.skipUpdateCheck", "true");
+    // Use custom sizing engine to speed cache sizing calculations
+    System.setProperty("net.sf.ehcache.sizeofengine." + TIMELINE_METRIC_CACHE_MANAGER_NAME,
+      "org.apache.ambari.server.controller.metrics.timeline.cache.TimelineMetricsCacheSizeOfEngine");
+
     net.sf.ehcache.config.Configuration managerConfig =
       new net.sf.ehcache.config.Configuration();
+    managerConfig.setName(TIMELINE_METRIC_CACHE_MANAGER_NAME);
 
     // Set max heap available to the cache manager
     managerConfig.setMaxBytesLocalHeap(configuration.getMetricsCacheManagerHeapPercent());
@@ -74,17 +82,18 @@ public class TimelineMetricCacheProvider {
       configuration.getMetricCacheTTLSeconds() + ", idle = " +
       configuration.getMetricCacheIdleSeconds());
 
-    PersistenceConfiguration persistenceConfiguration = new PersistenceConfiguration();
-    persistenceConfiguration.setStrategy(Strategy.NONE.name());
-
-    //Create a Cache specifying its configuration.
+    // Create a Cache specifying its configuration.
     CacheConfiguration cacheConfiguration = new CacheConfiguration()
-        .name(TIMELINE_METRIC_CACHE_INSTANCE_NAME)
-        .timeToLiveSeconds(configuration.getMetricCacheTTLSeconds()) // 1 hour
-        .timeToIdleSeconds(configuration.getMetricCacheIdleSeconds()) // 5 minutes
-        .memoryStoreEvictionPolicy(MemoryStoreEvictionPolicy.LRU)
-        .eternal(false)
-        .persistence(persistenceConfiguration);
+      .name(TIMELINE_METRIC_CACHE_INSTANCE_NAME)
+      .timeToLiveSeconds(configuration.getMetricCacheTTLSeconds()) // 1 hour
+      .timeToIdleSeconds(configuration.getMetricCacheIdleSeconds()) // 5 minutes
+      .memoryStoreEvictionPolicy(MemoryStoreEvictionPolicy.LRU)
+      .sizeOfPolicy(new SizeOfPolicyConfiguration() // Set sizeOf policy to continue on max depth reached - avoid OOM
+        .maxDepth(10000)
+        .maxDepthExceededBehavior(MaxDepthExceededBehavior.CONTINUE))
+      .eternal(false)
+      .persistence(new PersistenceConfiguration()
+        .strategy(Strategy.NONE.name()));
 
     Cache cache = new Cache(cacheConfiguration);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricsCacheSizeOfEngine.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricsCacheSizeOfEngine.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricsCacheSizeOfEngine.java
new file mode 100644
index 0000000..d8f2e06
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricsCacheSizeOfEngine.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.metrics.timeline.cache;
+
+import net.sf.ehcache.pool.Size;
+import net.sf.ehcache.pool.SizeOfEngine;
+import net.sf.ehcache.pool.impl.DefaultSizeOfEngine;
+import net.sf.ehcache.pool.sizeof.ReflectionSizeOf;
+import net.sf.ehcache.pool.sizeof.SizeOf;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import java.util.Map;
+
+/**
+ * Cache sizing engine that reduces reflective calls over the Object graph to
+ * find total Heap usage.
+ */
+public class TimelineMetricsCacheSizeOfEngine implements SizeOfEngine {
+
+  private final static Logger LOG = LoggerFactory.getLogger(TimelineMetricsCacheSizeOfEngine.class);
+  public static int DEFAULT_MAX_DEPTH = 1000;
+  public static boolean DEFAULT_ABORT_WHEN_MAX_DEPTH_EXCEEDED = false;
+
+  private SizeOfEngine underlying = null;
+  SizeOf reflectionSizeOf = new ReflectionSizeOf();
+
+  // Optimizations
+  private volatile long timelineMetricPrimitivesApproximation = 0;
+
+  private TimelineMetricsCacheSizeOfEngine(SizeOfEngine underlying) {
+    this.underlying = underlying;
+  }
+
+  public TimelineMetricsCacheSizeOfEngine() {
+    this(new DefaultSizeOfEngine(DEFAULT_MAX_DEPTH, DEFAULT_ABORT_WHEN_MAX_DEPTH_EXCEEDED));
+
+    LOG.info("Creating custom sizeof engine for TimelineMetrics.");
+  }
+
+  @Override
+  public Size sizeOf(Object key, Object value, Object container) {
+    try {
+      LOG.debug("BEGIN - Sizeof, key: {}, value: {}", key, value);
+
+      long size = 0;
+
+      if (key instanceof TimelineAppMetricCacheKey) {
+        size += getTimelineMetricCacheKeySize((TimelineAppMetricCacheKey) key);
+      }
+
+      if (value instanceof TimelineMetricsCacheValue) {
+        size += getTimelineMetricCacheValueSize((TimelineMetricsCacheValue) value);
+      }
+      // Mark size as not being exact
+      return new Size(size, false);
+    } finally {
+      LOG.debug("END - Sizeof, key: {}", key);
+    }
+  }
+
+  private long getTimelineMetricCacheKeySize(TimelineAppMetricCacheKey key) {
+    long size = reflectionSizeOf.sizeOf(key.getAppId());
+    size += key.getMetricNames() != null && !key.getMetricNames().isEmpty() ?
+      reflectionSizeOf.deepSizeOf(1000, false, key.getMetricNames()).getCalculated() : 0;
+    size += key.getSpec() != null ?
+      reflectionSizeOf.deepSizeOf(1000, false, key.getSpec()).getCalculated() : 0;
+    // 4 fixed longs of @TemporalInfo + reference
+    size += 40;
+    size += 8; // Object overhead
+
+    return size;
+  }
+
+  private long getTimelineMetricCacheValueSize(TimelineMetricsCacheValue value) {
+    long size = 16; // startTime + endTime
+    Map<String, TimelineMetric> metrics = value.getTimelineMetrics();
+    size += 8; // Object reference
+
+    if (metrics != null) {
+      for (Map.Entry<String, TimelineMetric> metricEntry : metrics.entrySet()) {
+        size += reflectionSizeOf.sizeOf(metricEntry.getKey());
+
+        TimelineMetric metric = metricEntry.getValue();
+
+        if (timelineMetricPrimitivesApproximation == 0) {
+          timelineMetricPrimitivesApproximation += reflectionSizeOf.sizeOf(metric.getMetricName());
+          timelineMetricPrimitivesApproximation += reflectionSizeOf.sizeOf(metric.getAppId());
+          timelineMetricPrimitivesApproximation += reflectionSizeOf.sizeOf(metric.getHostName());
+          timelineMetricPrimitivesApproximation += reflectionSizeOf.sizeOf(metric.getInstanceId());
+          timelineMetricPrimitivesApproximation += reflectionSizeOf.sizeOf(metric.getTimestamp());
+          timelineMetricPrimitivesApproximation += reflectionSizeOf.sizeOf(metric.getStartTime());
+          timelineMetricPrimitivesApproximation += reflectionSizeOf.sizeOf(metric.getType());
+          timelineMetricPrimitivesApproximation += 8; // Object overhead
+
+          LOG.debug("timelineMetricPrimitivesApproximation bytes = " + timelineMetricPrimitivesApproximation);
+        }
+        size += timelineMetricPrimitivesApproximation;
+
+        Map<Long, Double> metricValues = metric.getMetricValues();
+        if (metricValues != null && !metricValues.isEmpty()) {
+          // Numeric wrapper: 12 bytes + 8 bytes Data type + 4 bytes alignment = 48 (Long, Double)
+          // Tree Map: 12 bytes for header + 20 bytes for 5 object fields : pointers + 1 byte for flag = 40
+          LOG.debug("Size of metric value: " + (48 + 40) * metricValues.size());
+          size += (48 + 40) * metricValues.size(); // Treemap size is O(1)
+        }
+      }
+      LOG.debug("Total Size of metric values in cache: " + size);
+    }
+
+    return size;
+  }
+
+  @Override
+  public SizeOfEngine copyWith(int maxDepth, boolean abortWhenMaxDepthExceeded) {
+    LOG.debug("Copying tracing sizeof engine, maxdepth: {}, abort: {}",
+      maxDepth, abortWhenMaxDepthExceeded);
+
+    return new TimelineMetricsCacheSizeOfEngine(
+      underlying.copyWith(maxDepth, abortWhenMaxDepthExceeded));
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/MetricsPaddingMethodTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/MetricsPaddingMethodTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/MetricsPaddingMethodTest.java
index c30c5eb..b35295d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/MetricsPaddingMethodTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/MetricsPaddingMethodTest.java
@@ -174,7 +174,7 @@ public class MetricsPaddingMethodTest {
     timelineMetric.setHostName("h1");
     timelineMetric.setAppId("a1");
     timelineMetric.setTimestamp(now);
-    Map<Long, Double> inputValues = new TreeMap<Long, Double>();
+    TreeMap<Long, Double> inputValues = new TreeMap<Long, Double>();
     inputValues.put(now - 100, 1.0d);
     inputValues.put(now - 200, 2.0d);
     inputValues.put(now - 300, 3.0d);

http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricCacheSizingTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricCacheSizingTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricCacheSizingTest.java
new file mode 100644
index 0000000..4dbf27a
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricCacheSizingTest.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.metrics.timeline.cache;
+
+import net.sf.ehcache.pool.sizeof.ReflectionSizeOf;
+import net.sf.ehcache.pool.sizeof.SizeOf;
+import org.apache.ambari.server.controller.internal.TemporalInfoImpl;
+import org.apache.ambari.server.controller.spi.TemporalInfo;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.junit.Assert;
+import org.junit.Test;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+public class TimelineMetricCacheSizingTest {
+
+  SizeOf reflectionSizeOf = new ReflectionSizeOf();
+
+  private TimelineMetric getSampleTimelineMetric(String metricName) {
+    TimelineMetric metric = new TimelineMetric();
+    metric.setMetricName(metricName);
+    metric.setAppId("KAFKA_BROKER");
+    metric.setInstanceId("NULL");
+    metric.setHostName("my.privatehostname.of.average.length");
+    metric.setTimestamp(System.currentTimeMillis());
+    metric.setStartTime(System.currentTimeMillis());
+    metric.setType("LONG");
+
+    // JSON dser gives a LinkedHashMap
+    TreeMap<Long, Double> valueMap = new TreeMap<>();
+    long now = System.currentTimeMillis();
+    for (int i = 0; i < 25000; i++) {
+      valueMap.put(new Long(now + i), new Double(1.0 + i));
+    }
+
+    metric.setMetricValues(valueMap);
+
+    return metric;
+  }
+
+  @Test
+  public void testTimelineMetricCacheSizing() throws Exception {
+    Set<String> metricNames = new HashSet<>();
+    String metric1 = "prefix1.suffix1.suffix2.actualNamePrefix.longMetricName1";
+    String metric2 = "prefix1.suffix1.suffix2.actualNamePrefix.longMetricName2";
+    String metric3 = "prefix1.suffix1.suffix2.actualNamePrefix.longMetricName3";
+    String metric4 = "prefix1.suffix1.suffix2.actualNamePrefix.longMetricName4";
+    String metric5 = "prefix1.suffix1.suffix2.actualNamePrefix.longMetricName5";
+    String metric6 = "prefix1.suffix1.suffix2.actualNamePrefix.longMetricName6";
+
+    metricNames.add(metric1);
+    metricNames.add(metric2);
+    metricNames.add(metric3);
+    metricNames.add(metric4);
+    metricNames.add(metric5);
+    metricNames.add(metric6);
+
+    long now = System.currentTimeMillis();
+    TemporalInfo temporalInfo = new TemporalInfoImpl(now - 1000, now, 15);
+
+    TimelineAppMetricCacheKey key = new TimelineAppMetricCacheKey(
+      metricNames, "KAFKA_BROKER", temporalInfo);
+    // Some random spec
+    key.setSpec("http://104.196.94.129:6188/ws/v1/timeline/metrics?metricNames=" +
+      "jvm.JvmMetrics.MemHeapCommittedM&appId=RESOURCEMANAGER&" +
+      "startTime=1439522640000&endTime=1440127440000&precision=hours");
+
+    Map<String, TimelineMetric> metricMap = new HashMap<>();
+    metricMap.put(metric1, getSampleTimelineMetric(metric1));
+    metricMap.put(metric2, getSampleTimelineMetric(metric2));
+    metricMap.put(metric3, getSampleTimelineMetric(metric3));
+    metricMap.put(metric4, getSampleTimelineMetric(metric4));
+    metricMap.put(metric5, getSampleTimelineMetric(metric5));
+    metricMap.put(metric6, getSampleTimelineMetric(metric6));
+
+    TimelineMetricsCacheValue value = new TimelineMetricsCacheValue(now - 1000, now, metricMap);
+
+    TimelineMetricsCacheSizeOfEngine customSizeOfEngine = new TimelineMetricsCacheSizeOfEngine();
+
+    long bytesFromReflectionEngine =
+      reflectionSizeOf.deepSizeOf(1000, false, key).getCalculated() +
+      reflectionSizeOf.deepSizeOf(1000, false, value).getCalculated();
+
+    long bytesFromCustomSizeOfEngine = customSizeOfEngine.sizeOf(key, value, null).getCalculated();
+
+    long sampleSizeInMB = bytesFromReflectionEngine / (1024 * 1024);
+    long discrepancyInKB = Math.abs(bytesFromCustomSizeOfEngine - bytesFromReflectionEngine) / 1024;
+
+    Assert.assertTrue("Sample size is greater that 10 MB", sampleSizeInMB > 10);
+    Assert.assertTrue("Discrepancy in values is less than 10K", discrepancyInKB  < 10);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/02943430/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricCacheTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricCacheTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricCacheTest.java
index 3432e1d..32ce1e4 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricCacheTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/cache/TimelineMetricCacheTest.java
@@ -132,7 +132,7 @@ public class TimelineMetricCacheTest {
     TimelineMetric timelineMetric = new TimelineMetric();
     timelineMetric.setMetricName("cpu_user");
     timelineMetric.setAppId("app1");
-    Map<Long, Double> metricValues = new HashMap<Long, Double>();
+    TreeMap<Long, Double> metricValues = new TreeMap<Long, Double>();
     metricValues.put(now + 100, 1.0);
     metricValues.put(now + 200, 2.0);
     metricValues.put(now + 300, 3.0);
@@ -297,7 +297,7 @@ public class TimelineMetricCacheTest {
     final TimelineMetric timelineMetric1 = new TimelineMetric();
     timelineMetric1.setMetricName("cpu_user");
     timelineMetric1.setAppId("app1");
-    Map<Long, Double> metricValues = new TreeMap<Long, Double>();
+    TreeMap<Long, Double> metricValues = new TreeMap<Long, Double>();
     metricValues.put(now - 100, 1.0);
     metricValues.put(now - 200, 2.0);
     metricValues.put(now - 300, 3.0);


[30/50] [abbrv] ambari git commit: AMBARI-13510: Display sysctl and limits configuration on the HAWQ config UI screen (bhuvnesh2703 via jaoki)

Posted by nc...@apache.org.
AMBARI-13510: Display sysctl and limits configuration on the HAWQ config UI screen (bhuvnesh2703 via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3b9b7c7e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3b9b7c7e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3b9b7c7e

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 3b9b7c7e7a09fd3e635d525a6df02386d6622461
Parents: bb7ca76
Author: Jun Aoki <ja...@apache.org>
Authored: Thu Oct 22 11:16:12 2015 -0700
Committer: Jun Aoki <ja...@apache.org>
Committed: Thu Oct 22 11:16:12 2015 -0700

----------------------------------------------------------------------
 .../2.0.0.0/configuration/hawq-limits-env.xml   |  46 ++++
 .../2.0.0.0/configuration/hawq-sysctl-env.xml   | 247 +++++++++++++++++++
 .../HAWQ/2.0.0.0/package/scripts/common.py      |  42 +++-
 .../HAWQ/2.0.0.0/package/scripts/constants.py   |   7 +-
 .../HAWQ/2.0.0.0/package/scripts/hawqstatus.py  |   3 +-
 .../2.0.0.0/package/scripts/master_helper.py    |   2 +-
 .../HAWQ/2.0.0.0/package/scripts/params.py      |   4 +
 .../HAWQ/2.0.0.0/package/scripts/utils.py       |   4 +-
 .../package/templates/hawq.limits.conf.j2       |   7 -
 .../package/templates/hawq.sysctl.conf.j2       |  27 --
 10 files changed, 340 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3b9b7c7e/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-limits-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-limits-env.xml b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-limits-env.xml
new file mode 100644
index 0000000..d8917e6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-limits-env.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+    <property>
+        <display-name>soft nofile</display-name>
+        <name>soft_nofile</name>
+        <value>2900000</value>
+        <description>Soft limit for the number of file handles or open files for user gpadmin. Value is set in file /etc/security/limits.d/gpadmin.conf</description>
+    </property>
+
+    <property>
+        <display-name>hard nofile</display-name>
+        <name>hard_nofile</name>
+        <value>2900000</value>
+        <description>Hard limit for the number of file handles or open files for user gpadmin. Value is set in /etc/security/limits.d/gpadmin.conf</description>
+    </property>
+
+    <property>
+        <display-name>soft nproc</display-name>
+        <name>soft_nproc</name>
+        <value>131072</value>
+        <description>Soft limit for the maximum number of processes for user gpadmin. Value is set in /etc/security/limits.d/gpadmin.conf</description>
+    </property>
+
+    <property>
+        <display-name>hard nproc</display-name>
+        <name>hard_nproc</name>
+        <value>131072</value>
+        <description>Hard limit for the maximum number of processes for user gpadmin. Value is set in /etc/security/limits.d/gpadmin.conf</description>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b9b7c7e/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-sysctl-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-sysctl-env.xml b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-sysctl-env.xml
new file mode 100644
index 0000000..32ae5a5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-sysctl-env.xml
@@ -0,0 +1,247 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+  <property>
+    <name>kernel.shmmax</name>
+    <value>500000000</value>
+    <description>Maximum size in bytes of a single shared memory segment that a Linux process can allocate in its
+      virtual address space</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>kernel.shmmni</name>
+    <value>4096</value>
+    <description>System wide maximum number of shared memory segments</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>kernel.shmall</name>
+    <value>4000000000</value>
+    <description>Total amount of shared memory pages that can be used system wide</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>kernel.sem</name>
+    <value>250 512000 100 2048</value>
+    <description>Parameter to define semaphore related values</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>kernel.sysrq</name>
+    <value>1</value>
+    <description>Enable(1)/Disable(0) functions of sysrq</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>kernel.core_uses_pid</name>
+    <value>1</value>
+    <description>Enable appending process id to the name of core dump file. Ex: core.PID</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>kernel.msgmnb</name>
+    <value>65536</value>
+    <description>Default maximum size of a message in bytes</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>kernel.msgmax</name>
+    <value>65536</value>
+    <description>Default maxmimum size of a mesage queue</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>kernel.msgmni</name>
+    <value>2048</value>
+    <description>Number of message queue identifiers</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>net.ipv4.tcp_syncookies</name>
+    <value>0</value>
+    <description>Enable(1)/Disable(0) SYN cookie protection</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>net.ipv4.ip_forward</name>
+    <value>0</value>
+    <description>Enable(1)/Disable(0) IP forwarding</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>net.ipv4.conf.default.accept_source_route</name>
+    <value>0</value>
+    <description>Enable(1)/Disable(0) IP source routing</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>net.ipv4.tcp_tw_recycle</name>
+    <value>1</value>
+    <description>Enable(1)/Disable(0) fast recycling of TIME_WAIT sockets</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>net.ipv4.tcp_max_syn_backlog</name>
+    <value>200000</value>
+    <description>Number of outstanding syn requests allowed</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>net.ipv4.conf.all.arp_filter</name>
+    <value>1</value>
+    <description>Enable(1)/Disable(0) route lookup to determine the interface through which to send the reply</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>net.ipv4.ip_local_port_range</name>
+    <value>1281 65535</value>
+    <description>Local port range that is used by TCP and UDP traffic to choose the local port</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>net.core.netdev_max_backlog</name>
+    <value>200000</value>
+    <description>Maximum number of packets allowed to queue when a particular interface receives packets faster than the kernel can process them</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>fs.nr_open</name>
+    <value>3000000</value>
+    <description>Maximum number of file-handles a process can allocate</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>kernel.threads-max</name>
+    <value>798720</value>
+    <description>System-wide maximum number of threads to be used by the kernel at one time</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>kernel.pid_max</name>
+    <value>798720</value>
+    <description>Maximum number of unique process identifiers</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>net.core.rmem_max</name>
+    <value>2097152</value>
+    <description>Maximum receive socket buffer size in bytes</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>net.core.wmem_max</name>
+    <value>2097152</value>
+    <description>Maximum send socket buffer size in bytes</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>vm.overcommit_memory</name>
+    <value>1</value>
+    <description>Defines the conditions that determine whether a large memory request is accepted or denied. There are
+      three possible values for this parameter: 0, 1 or 2. For production environment, value of 2 is recommended</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b9b7c7e/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/common.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/common.py b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/common.py
index defd87c..41a3196 100644
--- a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/common.py
+++ b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/common.py
@@ -143,15 +143,46 @@ def __update_limits_file():
   """
   Updates /etc/security/limits.d/hawq.conf file with the HAWQ parameters.
   """
+  import params
   # Ensure limits directory exists
-  Directory(constants.limits_conf_dir, recursive=True, owner=constants.root_user, group=constants.root_user
-            )
+  Directory(constants.limits_conf_dir, recursive=True, owner=constants.root_user, group=constants.root_user)
 
   # Generate limits for hawq user
-  File('{0}/{1}.conf'.format(constants.limits_conf_dir, constants.hawq_user), content=Template("hawq.limits.conf.j2"),
+  limits_file_content = "#### HAWQ Limits Parameters  ###########\n"
+  for key, value in params.hawq_limits.iteritems():
+    if not __valid_input(value):
+      raise Exception("Value {0} for parameter {1} contains non-numeric characters which are not allowed (except whitespace), please fix the value and retry".format(value, key))
+    """
+    Content of the file to be written should be of the format
+    gpadmin soft nofile 290000
+    gpadmin hard nofile 290000
+    key used in the configuration is of the format soft_nofile, thus strip '_' and replace with 'space'
+    """
+    limits_file_content += "{0} {1} {2}\n".format(constants.hawq_user, re.sub("_", " ", key), value.strip())
+  File('{0}/{1}.conf'.format(constants.limits_conf_dir, constants.hawq_user), content=limits_file_content,
        owner=constants.hawq_user, group=constants.hawq_group)
 
 
+def __valid_input(value):
+  """
+  Validate if input value contains number (whitespaces allowed), return true if found else false
+  """
+  return re.search("^ *[0-9][0-9 ]*$", value)
+
+
+def __convert_sysctl_dict_to_text():
+  """
+  Convert sysctl configuration dict to text with each property value pair separated on new line
+  """
+  import params
+  sysctl_file_content = "### HAWQ System Parameters ###########\n"
+  for key, value in params.hawq_sysctl.iteritems():
+    if not __valid_input(value):
+      raise Exception("Value {0} for parameter {1} contains non-numeric characters which are not allowed (except whitespace), please fix the value and retry".format(value, key))
+    sysctl_file_content += "{0} = {1}\n".format(key, value)
+  return sysctl_file_content
+
+
 def __update_sysctl_file():
   """
   Updates /etc/sysctl.d/hawq_sysctl.conf file with the HAWQ parameters on CentOS/RHEL.
@@ -160,7 +191,7 @@ def __update_sysctl_file():
   Directory(constants.sysctl_conf_dir, recursive=True, owner=constants.root_user, group=constants.root_user)
 
   # Generate temporary file with kernel parameters needed by hawq
-  File(constants.hawq_sysctl_tmp_file, content=Template("hawq.sysctl.conf.j2"), owner=constants.hawq_user,
+  File(constants.hawq_sysctl_tmp_file, content=__convert_sysctl_dict_to_text(), owner=constants.hawq_user,
        group=constants.hawq_group)
 
   is_changed = True
@@ -187,8 +218,7 @@ def __update_sysctl_file_suse():
   backup_file_name = constants.sysctl_backup_file.format(str(int(time.time())))
   try:
     # Generate file with kernel parameters needed by hawq to temp file
-
-    File(constants.hawq_sysctl_tmp_file, content=Template("hawq.sysctl.conf.j2"), owner=constants.hawq_user,
+    File(constants.hawq_sysctl_tmp_file, content=__convert_sysctl_dict_to_text(), owner=constants.hawq_user,
         group=constants.hawq_group)
 
     sysctl_file_dict = utils.read_file_to_dict(constants.sysctl_suse_file)

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b9b7c7e/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/constants.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/constants.py b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/constants.py
index 78a636e..b56fd59 100644
--- a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/constants.py
+++ b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/constants.py
@@ -26,10 +26,9 @@ STOP = "stop"
 
 # Users
 root_user = "root"
-gpadmin_user = "gpadmin"
-hawq_user = gpadmin_user
-hawq_group = gpadmin_user
-hawq_password = gpadmin_user
+hawq_user = "gpadmin"
+hawq_group = hawq_user
+hawq_password = hawq_user
 
 # Directories
 hawq_home_dir = "/usr/local/hawq/"

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b9b7c7e/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/hawqstatus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/hawqstatus.py b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/hawqstatus.py
index 59742bd..26dfdd0 100644
--- a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/hawqstatus.py
+++ b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/hawqstatus.py
@@ -59,7 +59,6 @@ def get_pid_file():
   if not pid:
     raise Fail("Failed to fetch pid from {0}".format(postmaster_pid_file))
 
-  File(hawq_pid_file, content=pid, owner=constants.gpadmin_user, group=constants.gpadmin_user)
+  File(hawq_pid_file, content=pid, owner=constants.hawq_user, group=constants.hawq_user)
 
   return hawq_pid_file
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b9b7c7e/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/master_helper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/master_helper.py b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/master_helper.py
index 82a5168..35f5112 100644
--- a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/master_helper.py
+++ b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/master_helper.py
@@ -107,7 +107,7 @@ def __init_standby():
   """
   Initializes the HAWQ Standby Master
   """
-    utils.exec_hawq_operation(constants.INIT, "{0} -a -v".format(constants.STANDBY))
+  utils.exec_hawq_operation(constants.INIT, "{0} -a -v".format(constants.STANDBY))
 
 
 def __get_component_name():

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b9b7c7e/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/params.py
index df33d0b..fb449b9 100644
--- a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/params.py
@@ -73,6 +73,10 @@ rm_host = __get_component_host('rm_host')
 
 # Config files
 gpcheck_content = config['configurations']['gpcheck-env']['content']
+# database user limits
+hawq_limits = config['configurations']['hawq-limits-env']
+# sysctl parameters
+hawq_sysctl = config['configurations']['hawq-sysctl-env']
 
 hawq_site = config['configurations']['hawq-site']
 hawq_master_dir = hawq_site.get('hawq_master_directory')

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b9b7c7e/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/utils.py b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/utils.py
index b42b3a6..da51c19 100644
--- a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/utils.py
+++ b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/utils.py
@@ -81,9 +81,9 @@ def exec_ssh_cmd(hostname, cmd):
   import params
   # Only gpadmin should be allowed to run command via ssh, thus not exposing user as a parameter
   if params.hostname != hostname:
-    cmd = "su - {0} -c 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {1} \"{2} \" '".format(constants.gpadmin_user, hostname, cmd)
+    cmd = "su - {0} -c 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {1} \"{2} \" '".format(constants.hawq_user, hostname, cmd)
   else:
-    cmd = "su - {0} -c \"{1}\"".format(constants.gpadmin_user, cmd)
+    cmd = "su - {0} -c \"{1}\"".format(constants.hawq_user, cmd)
   Logger.info("Command executed: {0}".format(cmd))
   process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
   (stdout, stderr) = process.communicate()

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b9b7c7e/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/templates/hawq.limits.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/templates/hawq.limits.conf.j2 b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/templates/hawq.limits.conf.j2
deleted file mode 100644
index 93e027c..0000000
--- a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/templates/hawq.limits.conf.j2
+++ /dev/null
@@ -1,7 +0,0 @@
-#######################################
-#### HAWQ Limits Parameters ###########
-#######################################
-{{hawq_user}} soft nofile 2900000
-{{hawq_user}} hard nofile 2900000
-{{hawq_user}} soft nproc 131072
-{{hawq_user}} hard nproc 131072
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b9b7c7e/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/templates/hawq.sysctl.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/templates/hawq.sysctl.conf.j2 b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/templates/hawq.sysctl.conf.j2
deleted file mode 100644
index 480228e..0000000
--- a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/templates/hawq.sysctl.conf.j2
+++ /dev/null
@@ -1,27 +0,0 @@
-#######################################
-#### HAWQ System Parameters ###########
-#######################################
-kernel.shmmax = 500000000
-kernel.shmmni = 4096
-kernel.shmall = 4000000000
-kernel.sem = 250 512000 100 2048
-kernel.sysrq = 1
-kernel.core_uses_pid = 1
-kernel.msgmnb = 65536
-kernel.msgmax = 65536
-kernel.msgmni = 2048
-net.ipv4.tcp_syncookies = 0
-net.ipv4.ip_forward = 0
-net.ipv4.conf.default.accept_source_route = 0
-net.ipv4.tcp_tw_recycle = 1
-net.ipv4.tcp_max_syn_backlog = 200000
-net.ipv4.conf.all.arp_filter = 1
-net.ipv4.ip_local_port_range = 1281 65535
-net.core.netdev_max_backlog = 200000
-fs.nr_open = 3000000
-kernel.threads-max = 798720
-kernel.pid_max = 798720
-# increase network
-net.core.rmem_max=2097152
-net.core.wmem_max=2097152
-vm.overcommit_memory = 1


[09/50] [abbrv] ambari git commit: AMBARI-13482. [CapSchedView] Mappings should be validated and invalid mappings should be disallowed. (Gaurav Nagar via yusaku)

Posted by nc...@apache.org.
AMBARI-13482. [CapSchedView] Mappings should be validated and invalid mappings should be disallowed. (Gaurav Nagar via yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/383aac0f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/383aac0f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/383aac0f

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 383aac0f581e833614b4307635d263e90ad90171
Parents: 844b1e9
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Wed Oct 21 15:23:00 2015 -0700
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Wed Oct 21 15:23:29 2015 -0700

----------------------------------------------------------------------
 .../main/resources/ui/app/controllers/queues.js | 36 ++++++++++++++++++--
 .../ui/app/templates/schedulerPanel.hbs         |  2 +-
 2 files changed, 35 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/383aac0f/contrib/views/capacity-scheduler/src/main/resources/ui/app/controllers/queues.js
----------------------------------------------------------------------
diff --git a/contrib/views/capacity-scheduler/src/main/resources/ui/app/controllers/queues.js b/contrib/views/capacity-scheduler/src/main/resources/ui/app/controllers/queues.js
index 429832f..416ff43 100644
--- a/contrib/views/capacity-scheduler/src/main/resources/ui/app/controllers/queues.js
+++ b/contrib/views/capacity-scheduler/src/main/resources/ui/app/controllers/queues.js
@@ -336,7 +336,7 @@ App.QueuesController = Ember.ArrayController.extend({
    * check if can save configs
    * @type {bool}
    */
-  canNotSave: cmp.any('hasOverCapacity', 'hasUncompetedAddings','hasNotValid','hasNotValidLabels'),
+  canNotSave: cmp.any('hasOverCapacity', 'hasUncompetedAddings','hasNotValid','hasNotValidLabels','hasInvalidQueueMappings'),
 
   /**
    * List of not valid queues.
@@ -384,5 +384,37 @@ App.QueuesController = Ember.ArrayController.extend({
    * True if uncompetedAddings is not empty.
    * @type {Boolean}
    */
-  hasUncompetedAddings:cmp.notEmpty('uncompetedAddings.[]')
+  hasUncompetedAddings:cmp.notEmpty('uncompetedAddings.[]'),
+
+  /**
+   * True if queue_mapping is not valid
+   * @type {Boolean}
+   */
+  hasInvalidQueueMappings : function() {
+    var mappings = this.get('scheduler.queue_mappings') || '',
+      queues = this.get('content.content'),
+      hasInvalidMapping = false;
+
+    if(mappings == '' || mappings == 'u:%user:%primary_group' || mappings == 'u:%user:%user') {
+      return false;
+    }
+
+    mappings.split(',').forEach(function(item) {
+      // item should be in format [u or g]:[name]:[queue_name]
+      var mapping= item.split(":");
+
+      if(mapping.length!=3 || (mapping[0] != 'u'&& mapping[0] != 'g')) {
+        hasInvalidMapping = true;
+      }else{
+        hasInvalidMapping = queues.filter(function(queue){
+            return !queue.get("queues"); //get all leaf queues
+          }).map(function(queue){
+            return queue.get("name");
+          }).indexOf(mapping[2]) == -1;
+      }
+
+    })
+
+    return hasInvalidMapping;
+  }.property('scheduler.queue_mappings','content.length','content.@each.capacity')
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/383aac0f/contrib/views/capacity-scheduler/src/main/resources/ui/app/templates/schedulerPanel.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/capacity-scheduler/src/main/resources/ui/app/templates/schedulerPanel.hbs b/contrib/views/capacity-scheduler/src/main/resources/ui/app/templates/schedulerPanel.hbs
index 388bc60..4419aa0 100644
--- a/contrib/views/capacity-scheduler/src/main/resources/ui/app/templates/schedulerPanel.hbs
+++ b/contrib/views/capacity-scheduler/src/main/resources/ui/app/templates/schedulerPanel.hbs
@@ -126,7 +126,7 @@
         }}
         {{#if isOperator}}
           <div class="col-xs-7 control-value">
-            {{expandable-input value=scheduler.queue_mappings class="input-sm form-control input-expand"}}
+            {{expandable-input value=scheduler.queue_mappings classNameBindings="hasInvalidQueueMappings:list-group-item-danger" class="input-sm form-control input-expand"}}
             {{#if schedulerDirtyFilelds.queue_mappings}}
               <div class="btn-group btn-group-xs" >
                   <a {{action 'rollbackProp' 'queue_mappings' scheduler}} href="#" class="btn btn-default btn-warning"><i class="fa fa-undo"></i></a>


[07/50] [abbrv] ambari git commit: AMBARI-13469. spelling mistake in the help message for max capacity in capacity scheduler. (Gaurav Nagar via yusaku)

Posted by nc...@apache.org.
AMBARI-13469. spelling mistake in the help message for max capacity in capacity scheduler. (Gaurav Nagar via yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a5162a71
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a5162a71
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a5162a71

Branch: refs/heads/branch-dev-patch-upgrade
Commit: a5162a71c4a03d3ac803c69e17492132b829fd9c
Parents: 85bd7cd
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Wed Oct 21 14:56:38 2015 -0700
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Wed Oct 21 14:56:38 2015 -0700

----------------------------------------------------------------------
 .../src/main/resources/ui/app/templates/capacityEditForm.hbs     | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a5162a71/contrib/views/capacity-scheduler/src/main/resources/ui/app/templates/capacityEditForm.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/capacity-scheduler/src/main/resources/ui/app/templates/capacityEditForm.hbs b/contrib/views/capacity-scheduler/src/main/resources/ui/app/templates/capacityEditForm.hbs
index dd55396..78aca04 100644
--- a/contrib/views/capacity-scheduler/src/main/resources/ui/app/templates/capacityEditForm.hbs
+++ b/contrib/views/capacity-scheduler/src/main/resources/ui/app/templates/capacityEditForm.hbs
@@ -46,7 +46,7 @@
     <div {{bind-attr class=":form-group this.isValid::has-error" }}>
       {{tooltip-label
         label='Max Capacity:'
-        message= 'The cap (maximum capacity) as a percentage of total capacity that this quire can utilize.'
+        message= 'The cap (maximum capacity) as a percentage of total capacity that this queue can utilize.'
       }}
       <div class="form-group">
         <div class="input-group input-percent">
@@ -80,7 +80,7 @@
     <div class="form-group">
       {{#tooltip-label
         label='Max Capacity:'
-        message= 'The cap (maximum capacity) as a percentage of total capacity that this quire can utilize.'
+        message= 'The cap (maximum capacity) as a percentage of total capacity that this queue can utilize.'
       }}
         <span>{{maximum_capacity}}%</span>
       {{/tooltip-label}}


[12/50] [abbrv] ambari git commit: AMBARI-13481. Ranger User Info tab: All subsequent subsection and their configs should be hidden if Ranger Usersync is disabled. (jaimin)

Posted by nc...@apache.org.
AMBARI-13481. Ranger User Info tab: All subsequent subsection and their configs should be hidden if Ranger Usersync is disabled. (jaimin)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/34a03533
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/34a03533
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/34a03533

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 34a03533b1bf844a9f7b3f56919de1db3d24754c
Parents: ac51d8b
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Tue Oct 20 11:33:01 2015 -0700
Committer: Jaimin Jetly <ja...@hortonworks.com>
Committed: Wed Oct 21 19:10:57 2015 -0700

----------------------------------------------------------------------
 ambari-web/app/mappers/configs/themes_mapper.js | 62 +++++++++++++---
 ambari-web/app/models.js                        |  2 +-
 .../app/models/configs/config_condition.js      | 65 -----------------
 .../app/models/configs/theme/sub_section.js     |  9 ++-
 .../app/models/configs/theme/sub_section_tab.js |  9 ++-
 .../app/models/configs/theme/theme_condition.js | 76 ++++++++++++++++++++
 .../configs/service_config_layout_tab_view.js   |  3 +-
 .../configs/widgets/config_widget_view.js       | 30 +++++++-
 8 files changed, 172 insertions(+), 84 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/34a03533/ambari-web/app/mappers/configs/themes_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/configs/themes_mapper.js b/ambari-web/app/mappers/configs/themes_mapper.js
index fff2013..0a55068 100644
--- a/ambari-web/app/mappers/configs/themes_mapper.js
+++ b/ambari-web/app/mappers/configs/themes_mapper.js
@@ -22,7 +22,7 @@ App.themesMapper = App.QuickDataMapper.create({
   sectionModel: App.Section,
   subSectionModel: App.SubSection,
   subSectionTabModel: App.SubSectionTab,
-  configConditionModel: App.ConfigCondition,
+  themeConditionModel: App.ThemeCondition,
 
   tabConfig: {
     "id": "name",
@@ -107,27 +107,40 @@ App.themesMapper = App.QuickDataMapper.create({
 
               if (section.subsections) {
                 var subSections = [];
+                var subSectionConditions = [];
                 section.subsections.forEach(function(subSection) {
                   var parsedSubSection = this.parseIt(subSection, this.get("subSectionConfig"));
                   parsedSubSection.section_id = parsedSection.id;
 
                   if (subSection['subsection-tabs']) {
                     var subSectionTabs = [];
+                    var subSectionTabConditions = [];
 
                     subSection['subsection-tabs'].forEach(function (subSectionTab) {
                       var parsedSubSectionTab = this.parseIt(subSectionTab, this.get("subSectionTabConfig"));
                       parsedSubSectionTab.sub_section_id = parsedSubSection.id;
-
+                      if (parsedSubSectionTab['depends_on']) {
+                        subSectionTabConditions.push(parsedSubSectionTab);
+                      }
                       subSectionTabs.push(parsedSubSectionTab);
                     }, this);
                     subSectionTabs[0].is_active = true;
-
+                    if (subSectionTabConditions.length) {
+                      var type = 'subsectionTab';
+                      this.mapThemeConditions(subSectionTabConditions, type);
+                    }
                     App.store.loadMany(this.get("subSectionTabModel"), subSectionTabs);
                     parsedSubSection.sub_section_tabs = subSectionTabs.mapProperty("id");
                   }
-
+                  if (parsedSubSection['depends_on']) {
+                    subSectionConditions.push(parsedSubSection);
+                  }
                   subSections.push(parsedSubSection);
                 }, this);
+                if (subSectionConditions.length) {
+                  var type = 'subsection';
+                  this.mapThemeConditions(subSectionConditions, type);
+                }
                 App.store.loadMany(this.get("subSectionModel"), subSections);
                 parsedSection.sub_sections = subSections.mapProperty("id");
               }
@@ -159,15 +172,12 @@ App.themesMapper = App.QuickDataMapper.create({
       var subSectionTabId = configLink["subsection-tab-name"];
       if (subSectionTabId) {
         var subSectionTab = App.SubSectionTab.find(subSectionTabId);
-        var subSectionTabDependsOnConfigs = subSectionTab.get('dependsOn');
       } else if (subSectionId) {
         var subSection = App.SubSection.find(subSectionId);
-        var subSectionDependsOnConfigs = subSection.get('dependsOn');
       }
       var configProperty = App.StackConfigProperty.find(configId);
 
-      var configDependsOnOtherConfigs = configLink["depends-on"] || [];
-      var dependsOnConfigs = configDependsOnOtherConfigs.concat(subSectionDependsOnConfigs || []).concat(subSectionTabDependsOnConfigs || []);
+      var dependsOnConfigs = configLink["depends-on"] || [];
 
       if (configProperty.get('id') && subSection) {
         subSection.get('configProperties').pushObject(configProperty);
@@ -181,7 +191,7 @@ App.themesMapper = App.QuickDataMapper.create({
         if (valueAttributes) {
           var isUiOnlyProperty = valueAttributes["ui_only_property"];
           // UI only configs are mentioned in the themes for supporting widgets that is not intended for setting a value
-          // And thus is affiliated witha fake config peperty termed as ui only config property
+          // And thus is affiliated with fake config property termed as ui only config property
           if (isUiOnlyProperty && subSection) {
             var split = configLink.config.split("/");
             var fileName =  split[0] + '.xml';
@@ -237,11 +247,43 @@ App.themesMapper = App.QuickDataMapper.create({
       }
 
       configCondition.resource = _configCondition.resource || 'config';
+      configCondition.type = _configCondition.type || 'config';
 
       configConditionsCopy.pushObject(configCondition);
     }, this);
 
-    App.store.loadMany(this.get("configConditionModel"), configConditionsCopy);
+    App.store.loadMany(this.get("themeConditionModel"), configConditionsCopy);
+    App.store.commit();
+  },
+
+  /**
+   *
+   * @param subSections: Array
+   * @param type: {String} possible values: `subsection` or `subsectionTab`
+   */
+  mapThemeConditions: function(subSections, type) {
+    var subSectionConditionsCopy = [];
+    subSections.forEach(function(_subSection){
+      var subSectionConditions = _subSection['depends_on'];
+      subSectionConditions.forEach(function(_subSectionCondition, index){
+        var subSectionCondition = $.extend({},_subSectionCondition);
+        subSectionCondition.id = _subSection.id + '_' + index;
+        subSectionCondition.name = _subSection.name;
+        if (_subSectionCondition.configs && _subSectionCondition.configs.length) {
+          subSectionCondition.configs = _subSectionCondition.configs.map(function (item) {
+            var result = {};
+            result.fileName = item.split('/')[0] + '.xml';
+            result.configName = item.split('/')[1];
+            return result;
+          });
+        }
+
+        subSectionCondition.resource = _subSectionCondition.resource || 'config';
+        subSectionCondition.type = _subSectionCondition.type || type;
+        subSectionConditionsCopy.pushObject(subSectionCondition);
+      }, this);
+    }, this);
+    App.store.loadMany(this.get("themeConditionModel"), subSectionConditionsCopy);
     App.store.commit();
   },
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/34a03533/ambari-web/app/models.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models.js b/ambari-web/app/models.js
index 8117f62..9440dea 100644
--- a/ambari-web/app/models.js
+++ b/ambari-web/app/models.js
@@ -60,7 +60,7 @@ require('models/master_component');
 require('models/host_stack_version');
 require('models/root_service');
 require('models/upgrade_entity');
-require('models/configs/config_condition');
+require('models/configs/theme/theme_condition');
 require('models/configs/service_config_version');
 require('models/configs/stack_config_property');
 require('models/configs/config_group');

http://git-wip-us.apache.org/repos/asf/ambari/blob/34a03533/ambari-web/app/models/configs/config_condition.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/config_condition.js b/ambari-web/app/models/configs/config_condition.js
deleted file mode 100644
index f978424..0000000
--- a/ambari-web/app/models/configs/config_condition.js
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * THIS IS NOT USED FOR NOW
- * FOR CONFIG GROUPS WE ARE USING OLD MODELS AND LOGIC
- */
-
-var App = require('app');
-
-App.ConfigCondition = DS.Model.extend({
-  /**
-   * unique id generated as <code>config_name<code><code>filename<code>
-   * @property {string}
-   */
-  id: DS.attr('string'),
-
-  /**
-   * Name of the config that is being affected with the condition
-   */
-  configName: DS.attr('string'),
-
-  /**
-   * File name to which the config getting affected belongs
-   */
-  fileName: DS.attr('string'),
-
-  /**
-   * List of configs whose values affect the config
-   * Each Object in an array consists of configName and fileName
-   */
-  configs: DS.attr('array', {defaultValue: []}),
-
-  /**
-   * resource can be `config`, `service`
-   */
-  resource: DS.attr('string', {defaultValue: 'config'}),
-
-  /**
-   * conditional String which can be evaluated to boolean result.
-   * If evaluated result of this staring is true then use the statement provided by `then` attribute.
-   * Otherwise use the attribute provided by `else` attributes
-   */
-  if: DS.attr('string'),
-  then: DS.attr('object', {defaultValue: null}),
-  else: DS.attr('object', {defaultValue: null})
-
-});
-
-App.ConfigCondition.FIXTURES = [];

http://git-wip-us.apache.org/repos/asf/ambari/blob/34a03533/ambari-web/app/models/configs/theme/sub_section.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/theme/sub_section.js b/ambari-web/app/models/configs/theme/sub_section.js
index 261d3b8..ed3c7a2 100644
--- a/ambari-web/app/models/configs/theme/sub_section.js
+++ b/ambari-web/app/models/configs/theme/sub_section.js
@@ -172,6 +172,11 @@ App.SubSection = DS.Model.extend({
   }.property('columnIndex', 'columnSpan', 'section.sectionColumns'),
 
   /**
+   * If the visibility of subsection is dependent on a value of some config
+   */
+  isHiddenByConfig: false,
+
+  /**
    * Determines if subsection is filtered by checking it own configs
    * If there is no configs, subsection can't be hidden
    * @type {boolean}
@@ -186,8 +191,8 @@ App.SubSection = DS.Model.extend({
    * @type {boolean}
    */
   isSectionVisible: function () {
-    return !this.get('isHiddenByFilter') && this.get('configs').someProperty('isVisible', true);
-  }.property('isHiddenByFilter', 'configs.@each.isVisible')
+    return !this.get('isHiddenByFilter') && !this.get('isHiddenByConfig') && this.get('configs').someProperty('isVisible', true);
+  }.property('isHiddenByFilter', 'configs.@each.isVisible', 'isHiddenByConfig')
 });
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/34a03533/ambari-web/app/models/configs/theme/sub_section_tab.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/theme/sub_section_tab.js b/ambari-web/app/models/configs/theme/sub_section_tab.js
index bf7c015..ca42aeb 100644
--- a/ambari-web/app/models/configs/theme/sub_section_tab.js
+++ b/ambari-web/app/models/configs/theme/sub_section_tab.js
@@ -66,6 +66,11 @@ App.SubSectionTab = DS.Model.extend({
   }.property('configs.@each.isVisible', 'configs.@each.isValid', 'configs.@each.overrideErrorTrigger'),
 
   /**
+   * If the visibility of subsection is dependent on a value of some config
+   */
+  isHiddenByConfig: false,
+
+  /**
    * Determines if subsection is filtered by checking it own configs
    * If there is no configs, subsection can't be hidden
    * @type {boolean}
@@ -80,8 +85,8 @@ App.SubSectionTab = DS.Model.extend({
    * @type {boolean}
    */
   isVisible: function () {
-    return !this.get('isHiddenByFilter') && this.get('configs').someProperty('isVisible', true);
-  }.property('isHiddenByFilter', 'configs.@each.isVisible')
+    return !this.get('isHiddenByFilter') && !this.get('isHiddenByConfig') && this.get('configs').someProperty('isVisible', true);
+  }.property('isHiddenByFilter', 'isHiddenByConfig', 'configs.@each.isVisible')
 });
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/34a03533/ambari-web/app/models/configs/theme/theme_condition.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/theme/theme_condition.js b/ambari-web/app/models/configs/theme/theme_condition.js
new file mode 100644
index 0000000..a77256a
--- /dev/null
+++ b/ambari-web/app/models/configs/theme/theme_condition.js
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * THIS IS NOT USED FOR NOW
+ * FOR CONFIG GROUPS WE ARE USING OLD MODELS AND LOGIC
+ */
+
+var App = require('app');
+
+App.ThemeCondition = DS.Model.extend({
+  /**
+   * unique id generated as <code>config_name<code><code>filename<code>
+   * @property {string}
+   */
+  id: DS.attr('string'),
+
+  /**
+   *  type can be `config`, `service`
+   */
+  type: DS.attr('string'),
+
+  /**
+   * This is specific to subsection and subsectionTab type
+   */
+
+  name: DS.attr('string'),
+
+  /**
+   * Name of the config that is being affected with the condition
+   */
+  configName: DS.attr('string'),
+
+  /**
+   * File name to which the config getting affected belongs
+   */
+  fileName: DS.attr('string'),
+
+  /**
+   * List of configs whose values affect the config
+   * Each Object in an array consists of configName and fileName
+   */
+  configs: DS.attr('array', {defaultValue: []}),
+
+  /**
+   * resource can be `config`, `service`
+   */
+  resource: DS.attr('string', {defaultValue: 'config'}),
+
+  /**
+   * conditional String which can be evaluated to boolean result.
+   * If evaluated result of this staring is true then use the statement provided by `then` attribute.
+   * Otherwise use the attribute provided by `else` attributes
+   */
+  if: DS.attr('string'),
+  then: DS.attr('object', {defaultValue: null}),
+  else: DS.attr('object', {defaultValue: null})
+
+});
+
+App.ThemeCondition.FIXTURES = [];

http://git-wip-us.apache.org/repos/asf/ambari/blob/34a03533/ambari-web/app/views/common/configs/service_config_layout_tab_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/service_config_layout_tab_view.js b/ambari-web/app/views/common/configs/service_config_layout_tab_view.js
index 720dd12..1aec1fa 100644
--- a/ambari-web/app/views/common/configs/service_config_layout_tab_view.js
+++ b/ambari-web/app/views/common/configs/service_config_layout_tab_view.js
@@ -119,8 +119,7 @@ App.ServiceConfigLayoutTabView = Em.View.extend(App.ConfigOverridable, {
         stackConfigProperty: config
       };
 
-
-      var configConditions = App.ConfigCondition.find().filter(function (_configCondition) {
+      var configConditions = App.ThemeCondition.find().filter(function (_configCondition) {
         // Filter config condition depending on the value of another config
         var conditionalConfigs = (_configCondition.get('configs')||[]).filterProperty('fileName', config.get('filename')).filterProperty('configName', config.get('name'));
         // Filter config condition depending on the service existence or service state

http://git-wip-us.apache.org/repos/asf/ambari/blob/34a03533/ambari-web/app/views/common/configs/widgets/config_widget_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/widgets/config_widget_view.js b/ambari-web/app/views/common/configs/widgets/config_widget_view.js
index b3e806f..9e120ce 100644
--- a/ambari-web/app/views/common/configs/widgets/config_widget_view.js
+++ b/ambari-web/app/views/common/configs/widgets/config_widget_view.js
@@ -430,7 +430,11 @@ App.ConfigWidgetView = Em.View.extend(App.SupportsDependentConfigs, App.WidgetPo
           }
         }, this);
         isConditionTrue = window.eval(allConditionResult.join(''));
-        this.changeConfigAttribute(configCondition, isConditionTrue);
+        if (configCondition.get("type") === 'subsection' || configCondition.get("type") === 'subsectionTab') {
+          this.changeSubsectionAttribute(configCondition, isConditionTrue);
+        } else {
+          this.changeConfigAttribute(configCondition, isConditionTrue);
+        }
       } else if (configCondition.get("resource") === 'service') {
         var service = App.Service.find().findProperty('serviceName', ifStatement);
         var serviceName;
@@ -449,7 +453,7 @@ App.ConfigWidgetView = Em.View.extend(App.SupportsDependentConfigs, App.WidgetPo
 
   /**
    *
-   * @param configCondition {App.ConfigCondition}
+   * @param configCondition {App.ThemeCondition}
    * @param isConditionTrue {boolean}
    */
   changeConfigAttribute: function(configCondition, isConditionTrue) {
@@ -470,6 +474,28 @@ App.ConfigWidgetView = Em.View.extend(App.SupportsDependentConfigs, App.WidgetPo
     }
   },
 
+  /**
+   *
+   * @param subsectionCondition {App.ThemeCondition}
+   * @param isConditionTrue {boolean}
+   */
+  changeSubsectionAttribute: function(subsectionCondition, isConditionTrue) {
+    var subsectionConditionName = subsectionCondition.get('name');
+    var action = isConditionTrue ? subsectionCondition.get("then") : subsectionCondition.get("else");
+    if (subsectionCondition.get('id')) {
+      var valueAttributes = action.property_value_attributes;
+      if (valueAttributes && !Em.none(valueAttributes['visible'])) {
+        var themeResource;
+        if (subsectionCondition.get('type') === 'subsection') {
+          themeResource = App.SubSection.find().findProperty('name', subsectionConditionName);
+        } else if (subsectionCondition.get('type') === 'subsectionTab') {
+          themeResource = App.SubSectionTab.find().findProperty('name', subsectionConditionName);
+        }
+        themeResource.set('isHiddenByConfig', !valueAttributes['visible']);
+      }
+    }
+  },
+
 
 
   /**


[34/50] [abbrv] ambari git commit: AMBARI-13409. AMS Load Simulator updates. (Aravindan Vijayan via swagle)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/KAFKA_BROKER.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/KAFKA_BROKER.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/KAFKA_BROKER.dat
new file mode 100644
index 0000000..b9848d7
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/KAFKA_BROKER.dat
@@ -0,0 +1,1104 @@
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetCommit.max
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetCommit.mean
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetCommit.min
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetCommit.stddev
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetCommit.median
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetCommit.98percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetCommit.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetCommit.999percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetCommit.99percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetCommit.75percentile
+kafka.server.DelayedOperationPurgatory.PurgatorySize.delayedOperation.Heartbeat
+kafka.server.ReplicaManager.LeaderCount
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Update.Metadata.max
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Update.Metadata.mean
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Update.Metadata.min
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Update.Metadata.stddev
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Update.Metadata.median
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Update.Metadata.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Update.Metadata.95percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Update.Metadata.999percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Update.Metadata.99percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Update.Metadata.75percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Fetch.max
+kafka.network.RequestMetrics.LocalTimeMs.request.Fetch.mean
+kafka.network.RequestMetrics.LocalTimeMs.request.Fetch.min
+kafka.network.RequestMetrics.LocalTimeMs.request.Fetch.stddev
+kafka.network.RequestMetrics.LocalTimeMs.request.Fetch.median
+kafka.network.RequestMetrics.LocalTimeMs.request.Fetch.98percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Fetch.95percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Fetch.999percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Fetch.99percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Fetch.75percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.ControlledShutdown.max
+kafka.network.RequestMetrics.LocalTimeMs.request.ControlledShutdown.mean
+kafka.network.RequestMetrics.LocalTimeMs.request.ControlledShutdown.min
+kafka.network.RequestMetrics.LocalTimeMs.request.ControlledShutdown.stddev
+kafka.network.RequestMetrics.LocalTimeMs.request.ControlledShutdown.median
+kafka.network.RequestMetrics.LocalTimeMs.request.ControlledShutdown.98percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.ControlledShutdown.95percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.ControlledShutdown.999percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.ControlledShutdown.99percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.ControlledShutdown.75percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetCommit.max
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetCommit.mean
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetCommit.min
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetCommit.stddev
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetCommit.median
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetCommit.98percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetCommit.95percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetCommit.999percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetCommit.99percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetCommit.75percentile
+kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent.count
+kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent.1MinuteRate
+kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent.meanRate
+kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent.5MinuteRate
+kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent.15MinuteRate
+kafka.cluster.Partition.UnderReplicated.partition.0.topic.ambari_kafka_service_check
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.UpdateMetadata.max
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.UpdateMetadata.mean
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.UpdateMetadata.min
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.UpdateMetadata.stddev
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.UpdateMetadata.median
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.UpdateMetadata.98percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.UpdateMetadata.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.UpdateMetadata.999percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.UpdateMetadata.99percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.UpdateMetadata.75percentile
+kafka.server.ReplicaManager.IsrExpandsPerSec.count
+kafka.server.ReplicaManager.IsrExpandsPerSec.1MinuteRate
+kafka.server.ReplicaManager.IsrExpandsPerSec.meanRate
+kafka.server.ReplicaManager.IsrExpandsPerSec.5MinuteRate
+kafka.server.ReplicaManager.IsrExpandsPerSec.15MinuteRate
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchConsumer.max
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchConsumer.mean
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchConsumer.min
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchConsumer.stddev
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchConsumer.median
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchConsumer.98percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchConsumer.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchConsumer.999percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchConsumer.99percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchConsumer.75percentile
+kafka.network.RequestMetrics.RequestsPerSec.request.LeaderAndIsr.count
+kafka.network.RequestMetrics.RequestsPerSec.request.LeaderAndIsr.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.LeaderAndIsr.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.LeaderAndIsr.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.LeaderAndIsr.15MinuteRate
+kafka.server.DelayedOperationPurgatory.NumDelayedOperations.delayedOperation.JoinGroup
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ConsumerMetadata.max
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ConsumerMetadata.mean
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ConsumerMetadata.min
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ConsumerMetadata.stddev
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ConsumerMetadata.median
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ConsumerMetadata.98percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ConsumerMetadata.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ConsumerMetadata.999percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ConsumerMetadata.99percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ConsumerMetadata.75percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Metadata.max
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Metadata.mean
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Metadata.min
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Metadata.stddev
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Metadata.median
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Metadata.98percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Metadata.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Metadata.999percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Metadata.99percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Metadata.75percentile
+kafka.server.DelayedOperationPurgatory.PurgatorySize.delayedOperation.Fetch
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetFetch.count
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetFetch.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetFetch.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetFetch.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetFetch.15MinuteRate
+kafka.network.RequestMetrics.TotalTimeMs.request.ControlledShutdown.max
+kafka.network.RequestMetrics.TotalTimeMs.request.ControlledShutdown.mean
+kafka.network.RequestMetrics.TotalTimeMs.request.ControlledShutdown.min
+kafka.network.RequestMetrics.TotalTimeMs.request.ControlledShutdown.stddev
+kafka.network.RequestMetrics.TotalTimeMs.request.ControlledShutdown.median
+kafka.network.RequestMetrics.TotalTimeMs.request.ControlledShutdown.98percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.ControlledShutdown.95percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.ControlledShutdown.999percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.ControlledShutdown.99percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.ControlledShutdown.75percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Metadata.max
+kafka.network.RequestMetrics.RemoteTimeMs.request.Metadata.mean
+kafka.network.RequestMetrics.RemoteTimeMs.request.Metadata.min
+kafka.network.RequestMetrics.RemoteTimeMs.request.Metadata.stddev
+kafka.network.RequestMetrics.RemoteTimeMs.request.Metadata.median
+kafka.network.RequestMetrics.RemoteTimeMs.request.Metadata.98percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Metadata.95percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Metadata.999percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Metadata.99percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Metadata.75percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.LeaderAndIsr.max
+kafka.network.RequestMetrics.RemoteTimeMs.request.LeaderAndIsr.mean
+kafka.network.RequestMetrics.RemoteTimeMs.request.LeaderAndIsr.min
+kafka.network.RequestMetrics.RemoteTimeMs.request.LeaderAndIsr.stddev
+kafka.network.RequestMetrics.RemoteTimeMs.request.LeaderAndIsr.median
+kafka.network.RequestMetrics.RemoteTimeMs.request.LeaderAndIsr.98percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.LeaderAndIsr.95percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.LeaderAndIsr.999percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.LeaderAndIsr.99percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.LeaderAndIsr.75percentile
+kafka.server.DelayedOperationPurgatory.NumDelayedOperations.delayedOperation.Produce
+kafka.server.ReplicaFetcherManager.MaxLag.clientId.Replica
+kafka.network.RequestMetrics.RemoteTimeMs.request.JoinGroup.max
+kafka.network.RequestMetrics.RemoteTimeMs.request.JoinGroup.mean
+kafka.network.RequestMetrics.RemoteTimeMs.request.JoinGroup.min
+kafka.network.RequestMetrics.RemoteTimeMs.request.JoinGroup.stddev
+kafka.network.RequestMetrics.RemoteTimeMs.request.JoinGroup.median
+kafka.network.RequestMetrics.RemoteTimeMs.request.JoinGroup.98percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.JoinGroup.95percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.JoinGroup.999percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.JoinGroup.99percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.JoinGroup.75percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Offsets.max
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Offsets.mean
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Offsets.min
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Offsets.stddev
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Offsets.median
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Offsets.98percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Offsets.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Offsets.999percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Offsets.99percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Offsets.75percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Heartbeat.max
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Heartbeat.mean
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Heartbeat.min
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Heartbeat.stddev
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Heartbeat.median
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Heartbeat.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Heartbeat.95percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Heartbeat.999percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Heartbeat.99percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Heartbeat.75percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Produce.max
+kafka.network.RequestMetrics.RemoteTimeMs.request.Produce.mean
+kafka.network.RequestMetrics.RemoteTimeMs.request.Produce.min
+kafka.network.RequestMetrics.RemoteTimeMs.request.Produce.stddev
+kafka.network.RequestMetrics.RemoteTimeMs.request.Produce.median
+kafka.network.RequestMetrics.RemoteTimeMs.request.Produce.98percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Produce.95percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Produce.999percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Produce.99percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Produce.75percentile
+kafka.network.RequestChannel.ResponseQueueSize
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetFetch.max
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetFetch.mean
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetFetch.min
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetFetch.stddev
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetFetch.median
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetFetch.98percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetFetch.95percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetFetch.999percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetFetch.99percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetFetch.75percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Fetch.max
+kafka.network.RequestMetrics.TotalTimeMs.request.Fetch.mean
+kafka.network.RequestMetrics.TotalTimeMs.request.Fetch.min
+kafka.network.RequestMetrics.TotalTimeMs.request.Fetch.stddev
+kafka.network.RequestMetrics.TotalTimeMs.request.Fetch.median
+kafka.network.RequestMetrics.TotalTimeMs.request.Fetch.98percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Fetch.95percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Fetch.999percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Fetch.99percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Fetch.75percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.LeaderAndIsr.max
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.LeaderAndIsr.mean
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.LeaderAndIsr.min
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.LeaderAndIsr.stddev
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.LeaderAndIsr.median
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.LeaderAndIsr.98percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.LeaderAndIsr.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.LeaderAndIsr.999percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.LeaderAndIsr.99percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.LeaderAndIsr.75percentile
+kafka.network.RequestMetrics.RequestsPerSec.request.Fetch.count
+kafka.network.RequestMetrics.RequestsPerSec.request.Fetch.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Fetch.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Fetch.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Fetch.15MinuteRate
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Produce.max
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Produce.mean
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Produce.min
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Produce.stddev
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Produce.median
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Produce.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Produce.95percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Produce.999percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Produce.99percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Produce.75percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Metadata.max
+kafka.network.RequestMetrics.LocalTimeMs.request.Metadata.mean
+kafka.network.RequestMetrics.LocalTimeMs.request.Metadata.min
+kafka.network.RequestMetrics.LocalTimeMs.request.Metadata.stddev
+kafka.network.RequestMetrics.LocalTimeMs.request.Metadata.median
+kafka.network.RequestMetrics.LocalTimeMs.request.Metadata.98percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Metadata.95percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Metadata.999percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Metadata.99percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Metadata.75percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Produce.max
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Produce.mean
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Produce.min
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Produce.stddev
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Produce.median
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Produce.98percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Produce.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Produce.999percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Produce.99percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Produce.75percentile
+kafka.server.DelayedOperationPurgatory.NumDelayedOperations.delayedOperation.Fetch
+kafka.network.RequestMetrics.TotalTimeMs.request.UpdateMetadata.max
+kafka.network.RequestMetrics.TotalTimeMs.request.UpdateMetadata.mean
+kafka.network.RequestMetrics.TotalTimeMs.request.UpdateMetadata.min
+kafka.network.RequestMetrics.TotalTimeMs.request.UpdateMetadata.stddev
+kafka.network.RequestMetrics.TotalTimeMs.request.UpdateMetadata.median
+kafka.network.RequestMetrics.TotalTimeMs.request.UpdateMetadata.98percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.UpdateMetadata.95percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.UpdateMetadata.999percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.UpdateMetadata.99percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.UpdateMetadata.75percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetFetch.max
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetFetch.mean
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetFetch.min
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetFetch.stddev
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetFetch.median
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetFetch.98percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetFetch.95percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetFetch.999percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetFetch.99percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.OffsetFetch.75percentile
+kafka.network.SocketServer.NetworkProcessorAvgIdlePercent.count
+kafka.network.SocketServer.NetworkProcessorAvgIdlePercent.1MinuteRate
+kafka.network.SocketServer.NetworkProcessorAvgIdlePercent.meanRate
+kafka.network.SocketServer.NetworkProcessorAvgIdlePercent.5MinuteRate
+kafka.network.SocketServer.NetworkProcessorAvgIdlePercent.15MinuteRate
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetCommit.max
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetCommit.mean
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetCommit.min
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetCommit.stddev
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetCommit.median
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetCommit.98percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetCommit.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetCommit.999percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetCommit.99percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetCommit.75percentile
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchConsumer.count
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchConsumer.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchConsumer.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchConsumer.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchConsumer.15MinuteRate
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ControlledShutdown.max
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ControlledShutdown.mean
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ControlledShutdown.min
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ControlledShutdown.stddev
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ControlledShutdown.median
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ControlledShutdown.98percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ControlledShutdown.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ControlledShutdown.999percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ControlledShutdown.99percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ControlledShutdown.75percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchFollower.max
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchFollower.mean
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchFollower.min
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchFollower.stddev
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchFollower.median
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchFollower.98percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchFollower.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchFollower.999percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchFollower.99percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchFollower.75percentile
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchFollower.count
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchFollower.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchFollower.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchFollower.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchFollower.15MinuteRate
+kafka.network.SocketServer.ResponsesBeingSent
+kafka.server.BrokerTopicMetrics.BytesOutPerSec.count
+kafka.server.BrokerTopicMetrics.BytesOutPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.BytesOutPerSec.meanRate
+kafka.server.BrokerTopicMetrics.BytesOutPerSec.5MinuteRate
+kafka.server.BrokerTopicMetrics.BytesOutPerSec.15MinuteRate
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Fetch.max
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Fetch.mean
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Fetch.min
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Fetch.stddev
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Fetch.median
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Fetch.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Fetch.95percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Fetch.999percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Fetch.99percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Fetch.75percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Heartbeat.max
+kafka.network.RequestMetrics.TotalTimeMs.request.Heartbeat.mean
+kafka.network.RequestMetrics.TotalTimeMs.request.Heartbeat.min
+kafka.network.RequestMetrics.TotalTimeMs.request.Heartbeat.stddev
+kafka.network.RequestMetrics.TotalTimeMs.request.Heartbeat.median
+kafka.network.RequestMetrics.TotalTimeMs.request.Heartbeat.98percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Heartbeat.95percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Heartbeat.999percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Heartbeat.99percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Heartbeat.75percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.ConsumerMetadata.max
+kafka.network.RequestMetrics.RemoteTimeMs.request.ConsumerMetadata.mean
+kafka.network.RequestMetrics.RemoteTimeMs.request.ConsumerMetadata.min
+kafka.network.RequestMetrics.RemoteTimeMs.request.ConsumerMetadata.stddev
+kafka.network.RequestMetrics.RemoteTimeMs.request.ConsumerMetadata.median
+kafka.network.RequestMetrics.RemoteTimeMs.request.ConsumerMetadata.98percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.ConsumerMetadata.95percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.ConsumerMetadata.999percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.ConsumerMetadata.99percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.ConsumerMetadata.75percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.JoinGroup.max
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.JoinGroup.mean
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.JoinGroup.min
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.JoinGroup.stddev
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.JoinGroup.median
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.JoinGroup.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.JoinGroup.95percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.JoinGroup.999percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.JoinGroup.99percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.JoinGroup.75percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.LeaderAndIsr.max
+kafka.network.RequestMetrics.LocalTimeMs.request.LeaderAndIsr.mean
+kafka.network.RequestMetrics.LocalTimeMs.request.LeaderAndIsr.min
+kafka.network.RequestMetrics.LocalTimeMs.request.LeaderAndIsr.stddev
+kafka.network.RequestMetrics.LocalTimeMs.request.LeaderAndIsr.median
+kafka.network.RequestMetrics.LocalTimeMs.request.LeaderAndIsr.98percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.LeaderAndIsr.95percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.LeaderAndIsr.999percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.LeaderAndIsr.99percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.LeaderAndIsr.75percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Metadata.max
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Metadata.mean
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Metadata.min
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Metadata.stddev
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Metadata.median
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Metadata.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Metadata.95percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Metadata.999percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Metadata.99percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Metadata.75percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Produce.max
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Produce.mean
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Produce.min
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Produce.stddev
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Produce.median
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Produce.98percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Produce.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Produce.999percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Produce.99percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Produce.75percentile
+kafka.network.RequestMetrics.RequestsPerSec.request.StopReplica.count
+kafka.network.RequestMetrics.RequestsPerSec.request.StopReplica.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.StopReplica.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.StopReplica.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.StopReplica.15MinuteRate
+kafka.network.RequestMetrics.RemoteTimeMs.request.Heartbeat.max
+kafka.network.RequestMetrics.RemoteTimeMs.request.Heartbeat.mean
+kafka.network.RequestMetrics.RemoteTimeMs.request.Heartbeat.min
+kafka.network.RequestMetrics.RemoteTimeMs.request.Heartbeat.stddev
+kafka.network.RequestMetrics.RemoteTimeMs.request.Heartbeat.median
+kafka.network.RequestMetrics.RemoteTimeMs.request.Heartbeat.98percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Heartbeat.95percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Heartbeat.999percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Heartbeat.99percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Heartbeat.75percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ControlledShutdown.max
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ControlledShutdown.mean
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ControlledShutdown.min
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ControlledShutdown.stddev
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ControlledShutdown.median
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ControlledShutdown.98percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ControlledShutdown.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ControlledShutdown.999percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ControlledShutdown.99percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.ControlledShutdown.75percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Produce.max
+kafka.network.RequestMetrics.LocalTimeMs.request.Produce.mean
+kafka.network.RequestMetrics.LocalTimeMs.request.Produce.min
+kafka.network.RequestMetrics.LocalTimeMs.request.Produce.stddev
+kafka.network.RequestMetrics.LocalTimeMs.request.Produce.median
+kafka.network.RequestMetrics.LocalTimeMs.request.Produce.98percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Produce.95percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Produce.999percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Produce.99percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Produce.75percentile
+kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec.count
+kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec.meanRate
+kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec.5MinuteRate
+kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec.15MinuteRate
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.UpdateMetadata.max
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.UpdateMetadata.mean
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.UpdateMetadata.min
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.UpdateMetadata.stddev
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.UpdateMetadata.median
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.UpdateMetadata.98percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.UpdateMetadata.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.UpdateMetadata.999percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.UpdateMetadata.99percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.UpdateMetadata.75percentile
+kafka.network.RequestMetrics.RequestsPerSec.request.UpdateMetadata.count
+kafka.network.RequestMetrics.RequestsPerSec.request.UpdateMetadata.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.UpdateMetadata.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.UpdateMetadata.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.UpdateMetadata.15MinuteRate
+kafka.server.DelayedOperationPurgatory.NumDelayedOperations.delayedOperation.Rebalance
+kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec.count
+kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec.meanRate
+kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec.5MinuteRate
+kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec.15MinuteRate
+kafka.server.ReplicaManager.PartitionCount
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.StopReplica.max
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.StopReplica.mean
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.StopReplica.min
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.StopReplica.stddev
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.StopReplica.median
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.StopReplica.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.StopReplica.95percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.StopReplica.999percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.StopReplica.99percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.StopReplica.75percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchConsumer.max
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchConsumer.mean
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchConsumer.min
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchConsumer.stddev
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchConsumer.median
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchConsumer.98percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchConsumer.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchConsumer.999percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchConsumer.99percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.FetchConsumer.75percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetFetch.max
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetFetch.mean
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetFetch.min
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetFetch.stddev
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetFetch.median
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetFetch.98percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetFetch.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetFetch.999percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetFetch.99percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.OffsetFetch.75percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.max
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.mean
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.min
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.stddev
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.median
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.98percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.999percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.99percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.75percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchFollower.max
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchFollower.mean
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchFollower.min
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchFollower.stddev
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchFollower.median
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchFollower.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchFollower.95percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchFollower.999percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchFollower.99percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchFollower.75percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.max
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.mean
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.min
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.stddev
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.median
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.95percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.999percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.99percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.75percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.StopReplica.max
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.StopReplica.mean
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.StopReplica.min
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.StopReplica.stddev
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.StopReplica.median
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.StopReplica.98percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.StopReplica.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.StopReplica.999percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.StopReplica.99percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.StopReplica.75percentile
+kafka.network.RequestChannel.ResponseQueueSize.processor.1
+kafka.network.RequestMetrics.TotalTimeMs.request.ConsumerMetadata.max
+kafka.network.RequestMetrics.TotalTimeMs.request.ConsumerMetadata.mean
+kafka.network.RequestMetrics.TotalTimeMs.request.ConsumerMetadata.min
+kafka.network.RequestMetrics.TotalTimeMs.request.ConsumerMetadata.stddev
+kafka.network.RequestMetrics.TotalTimeMs.request.ConsumerMetadata.median
+kafka.network.RequestMetrics.TotalTimeMs.request.ConsumerMetadata.98percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.ConsumerMetadata.95percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.ConsumerMetadata.999percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.ConsumerMetadata.99percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.ConsumerMetadata.75percentile
+kafka.network.RequestChannel.ResponseQueueSize.processor.2
+kafka.network.RequestChannel.ResponseQueueSize.processor.0
+kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.count
+kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.1MinuteRate
+kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.meanRate
+kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.5MinuteRate
+kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.15MinuteRate
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchConsumer.max
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchConsumer.mean
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchConsumer.min
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchConsumer.stddev
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchConsumer.median
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchConsumer.98percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchConsumer.95percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchConsumer.999percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchConsumer.99percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchConsumer.75percentile
+kafka.server.OffsetManager.NumOffsets
+kafka.network.RequestMetrics.LocalTimeMs.request.JoinGroup.max
+kafka.network.RequestMetrics.LocalTimeMs.request.JoinGroup.mean
+kafka.network.RequestMetrics.LocalTimeMs.request.JoinGroup.min
+kafka.network.RequestMetrics.LocalTimeMs.request.JoinGroup.stddev
+kafka.network.RequestMetrics.LocalTimeMs.request.JoinGroup.median
+kafka.network.RequestMetrics.LocalTimeMs.request.JoinGroup.98percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.JoinGroup.95percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.JoinGroup.999percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.JoinGroup.99percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.JoinGroup.75percentile
+kafka.network.RequestChannel.RequestQueueSize
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetCommit.max
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetCommit.mean
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetCommit.min
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetCommit.stddev
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetCommit.median
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetCommit.98percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetCommit.95percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetCommit.999percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetCommit.99percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetCommit.75percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.StopReplica.max
+kafka.network.RequestMetrics.LocalTimeMs.request.StopReplica.mean
+kafka.network.RequestMetrics.LocalTimeMs.request.StopReplica.min
+kafka.network.RequestMetrics.LocalTimeMs.request.StopReplica.stddev
+kafka.network.RequestMetrics.LocalTimeMs.request.StopReplica.median
+kafka.network.RequestMetrics.LocalTimeMs.request.StopReplica.98percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.StopReplica.95percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.StopReplica.999percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.StopReplica.99percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.StopReplica.75percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Heartbeat.max
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Heartbeat.mean
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Heartbeat.min
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Heartbeat.stddev
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Heartbeat.median
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Heartbeat.98percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Heartbeat.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Heartbeat.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Heartbeat.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Heartbeat.999percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Heartbeat.99percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Heartbeat.75percentile
+kafka.log.Log.LogStartOffset.partition.0.topic.ambari_kafka_service_check
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Metadata.max
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Metadata.mean
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Metadata.min
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Metadata.stddev
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Metadata.median
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Metadata.98percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Metadata.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Metadata.999percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Metadata.99percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Metadata.75percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.StopReplica.max
+kafka.network.RequestMetrics.TotalTimeMs.request.StopReplica.mean
+kafka.network.RequestMetrics.TotalTimeMs.request.StopReplica.min
+kafka.network.RequestMetrics.TotalTimeMs.request.StopReplica.stddev
+kafka.network.RequestMetrics.TotalTimeMs.request.StopReplica.median
+kafka.network.RequestMetrics.TotalTimeMs.request.StopReplica.98percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.StopReplica.95percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.StopReplica.999percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.StopReplica.99percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.StopReplica.75percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchConsumer.max
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchConsumer.mean
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchConsumer.min
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchConsumer.stddev
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchConsumer.median
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchConsumer.98percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchConsumer.95percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchConsumer.999percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchConsumer.99percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchConsumer.75percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Offsets.max
+kafka.network.RequestMetrics.LocalTimeMs.request.Offsets.mean
+kafka.network.RequestMetrics.LocalTimeMs.request.Offsets.min
+kafka.network.RequestMetrics.LocalTimeMs.request.Offsets.stddev
+kafka.network.RequestMetrics.LocalTimeMs.request.Offsets.median
+kafka.network.RequestMetrics.LocalTimeMs.request.Offsets.98percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Offsets.95percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Offsets.999percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Offsets.99percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Offsets.75percentile
+kafka.server.BrokerTopicMetrics.BytesRejectedPerSec.count
+kafka.server.BrokerTopicMetrics.BytesRejectedPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.BytesRejectedPerSec.meanRate
+kafka.server.BrokerTopicMetrics.BytesRejectedPerSec.5MinuteRate
+kafka.server.BrokerTopicMetrics.BytesRejectedPerSec.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.ConsumerMetadata.count
+kafka.network.RequestMetrics.RequestsPerSec.request.ConsumerMetadata.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.ConsumerMetadata.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.ConsumerMetadata.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.ConsumerMetadata.15MinuteRate
+kafka.network.RequestMetrics.RemoteTimeMs.request.UpdateMetadata.max
+kafka.network.RequestMetrics.RemoteTimeMs.request.UpdateMetadata.mean
+kafka.network.RequestMetrics.RemoteTimeMs.request.UpdateMetadata.min
+kafka.network.RequestMetrics.RemoteTimeMs.request.UpdateMetadata.stddev
+kafka.network.RequestMetrics.RemoteTimeMs.request.UpdateMetadata.median
+kafka.network.RequestMetrics.RemoteTimeMs.request.UpdateMetadata.98percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.UpdateMetadata.95percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.UpdateMetadata.999percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.UpdateMetadata.99percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.UpdateMetadata.75percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ConsumerMetadata.max
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ConsumerMetadata.mean
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ConsumerMetadata.min
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ConsumerMetadata.stddev
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ConsumerMetadata.median
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ConsumerMetadata.98percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ConsumerMetadata.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ConsumerMetadata.999percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ConsumerMetadata.99percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.ConsumerMetadata.75percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchConsumer.max
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchConsumer.mean
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchConsumer.min
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchConsumer.stddev
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchConsumer.median
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchConsumer.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchConsumer.95percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchConsumer.999percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchConsumer.99percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.FetchConsumer.75percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.UpdateMetadata.max
+kafka.network.RequestMetrics.LocalTimeMs.request.UpdateMetadata.mean
+kafka.network.RequestMetrics.LocalTimeMs.request.UpdateMetadata.min
+kafka.network.RequestMetrics.LocalTimeMs.request.UpdateMetadata.stddev
+kafka.network.RequestMetrics.LocalTimeMs.request.UpdateMetadata.median
+kafka.network.RequestMetrics.LocalTimeMs.request.UpdateMetadata.98percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.UpdateMetadata.95percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.UpdateMetadata.999percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.UpdateMetadata.99percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.UpdateMetadata.75percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Heartbeat.max
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Heartbeat.mean
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Heartbeat.min
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Heartbeat.stddev
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Heartbeat.median
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Heartbeat.98percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Heartbeat.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Heartbeat.999percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Heartbeat.99percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Heartbeat.75percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetCommit.max
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetCommit.mean
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetCommit.min
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetCommit.stddev
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetCommit.median
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetCommit.98percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetCommit.95percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetCommit.999percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetCommit.99percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.OffsetCommit.75percentile
+kafka.controller.KafkaController.OfflinePartitionsCount
+kafka.network.RequestMetrics.TotalTimeMs.request.LeaderAndIsr.max
+kafka.network.RequestMetrics.TotalTimeMs.request.LeaderAndIsr.mean
+kafka.network.RequestMetrics.TotalTimeMs.request.LeaderAndIsr.min
+kafka.network.RequestMetrics.TotalTimeMs.request.LeaderAndIsr.stddev
+kafka.network.RequestMetrics.TotalTimeMs.request.LeaderAndIsr.median
+kafka.network.RequestMetrics.TotalTimeMs.request.LeaderAndIsr.98percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.LeaderAndIsr.95percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.LeaderAndIsr.999percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.LeaderAndIsr.99percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.LeaderAndIsr.75percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetFetch.max
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetFetch.mean
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetFetch.min
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetFetch.stddev
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetFetch.median
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetFetch.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetFetch.95percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetFetch.999percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetFetch.99percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetFetch.75percentile
+kafka.server.DelayedOperationPurgatory.NumDelayedOperations.delayedOperation.Heartbeat
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.max
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.mean
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.min
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.stddev
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.median
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.95percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.999percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.99percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.75percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Offsets.max
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Offsets.mean
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Offsets.min
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Offsets.stddev
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Offsets.median
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Offsets.98percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Offsets.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Offsets.999percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Offsets.99percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Offsets.75percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.JoinGroup.max
+kafka.network.RequestMetrics.TotalTimeMs.request.JoinGroup.mean
+kafka.network.RequestMetrics.TotalTimeMs.request.JoinGroup.min
+kafka.network.RequestMetrics.TotalTimeMs.request.JoinGroup.stddev
+kafka.network.RequestMetrics.TotalTimeMs.request.JoinGroup.median
+kafka.network.RequestMetrics.TotalTimeMs.request.JoinGroup.98percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.JoinGroup.95percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.JoinGroup.999percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.JoinGroup.99percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.JoinGroup.75percentile
+kafka.server.BrokerTopicMetrics.TotalProduceRequestsPerSec.count
+kafka.server.BrokerTopicMetrics.TotalProduceRequestsPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.TotalProduceRequestsPerSec.meanRate
+kafka.server.BrokerTopicMetrics.TotalProduceRequestsPerSec.5MinuteRate
+kafka.server.BrokerTopicMetrics.TotalProduceRequestsPerSec.15MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.0.count
+kafka.network.SocketServer.IdlePercent.networkProcessor.0.1MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.0.meanRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.0.5MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.0.15MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.1.count
+kafka.network.SocketServer.IdlePercent.networkProcessor.1.1MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.1.meanRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.1.5MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.1.15MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.2.count
+kafka.network.SocketServer.IdlePercent.networkProcessor.2.1MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.2.meanRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.2.5MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.2.15MinuteRate
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchFollower.max
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchFollower.mean
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchFollower.min
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchFollower.stddev
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchFollower.median
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchFollower.98percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchFollower.95percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchFollower.999percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchFollower.99percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchFollower.75percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Fetch.max
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Fetch.mean
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Fetch.min
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Fetch.stddev
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Fetch.median
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Fetch.98percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Fetch.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Fetch.999percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Fetch.99percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.Fetch.75percentile
+kafka.server.KafkaServer.BrokerState
+kafka.server.ReplicaManager.IsrShrinksPerSec.count
+kafka.server.ReplicaManager.IsrShrinksPerSec.1MinuteRate
+kafka.server.ReplicaManager.IsrShrinksPerSec.meanRate
+kafka.server.ReplicaManager.IsrShrinksPerSec.5MinuteRate
+kafka.server.ReplicaManager.IsrShrinksPerSec.15MinuteRate
+kafka.server.ReplicaFetcherManager.MinFetchRate.clientId.Replica
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.count
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.1MinuteRate
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.meanRate
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.5MinuteRate
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.15MinuteRate
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.max
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.mean
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.min
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.stddev
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.median
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.98percentile
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.95percentile
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.999percentile
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.99percentile
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.75percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchConsumer.max
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchConsumer.mean
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchConsumer.min
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchConsumer.stddev
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchConsumer.median
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchConsumer.98percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchConsumer.95percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchConsumer.999percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchConsumer.99percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.FetchConsumer.75percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetFetch.max
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetFetch.mean
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetFetch.min
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetFetch.stddev
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetFetch.median
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetFetch.98percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetFetch.95percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetFetch.999percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetFetch.99percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.OffsetFetch.75percentile
+kafka.log.Log.LogEndOffset.partition.0.topic.ambari_kafka_service_check
+kafka.server.DelayedOperationPurgatory.PurgatorySize.delayedOperation.Rebalance
+kafka.network.RequestMetrics.RequestsPerSec.request.Offsets.count
+kafka.network.RequestMetrics.RequestsPerSec.request.Offsets.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Offsets.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Offsets.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Offsets.15MinuteRate
+kafka.network.RequestMetrics.TotalTimeMs.request.Offsets.max
+kafka.network.RequestMetrics.TotalTimeMs.request.Offsets.mean
+kafka.network.RequestMetrics.TotalTimeMs.request.Offsets.min
+kafka.network.RequestMetrics.TotalTimeMs.request.Offsets.stddev
+kafka.network.RequestMetrics.TotalTimeMs.request.Offsets.median
+kafka.network.RequestMetrics.TotalTimeMs.request.Offsets.98percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Offsets.95percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Offsets.999percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Offsets.99percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Offsets.75percentile
+kafka.server.BrokerTopicMetrics.MessagesInPerSec.count
+kafka.server.BrokerTopicMetrics.MessagesInPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.MessagesInPerSec.meanRate
+kafka.server.BrokerTopicMetrics.MessagesInPerSec.5MinuteRate
+kafka.server.BrokerTopicMetrics.MessagesInPerSec.15MinuteRate
+kafka.server.BrokerTopicMetrics.TotalFetchRequestsPerSec.count
+kafka.server.BrokerTopicMetrics.TotalFetchRequestsPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.TotalFetchRequestsPerSec.meanRate
+kafka.server.BrokerTopicMetrics.TotalFetchRequestsPerSec.5MinuteRate
+kafka.server.BrokerTopicMetrics.TotalFetchRequestsPerSec.15MinuteRate
+kafka.network.RequestMetrics.LocalTimeMs.request.Heartbeat.max
+kafka.network.RequestMetrics.LocalTimeMs.request.Heartbeat.mean
+kafka.network.RequestMetrics.LocalTimeMs.request.Heartbeat.min
+kafka.network.RequestMetrics.LocalTimeMs.request.Heartbeat.stddev
+kafka.network.RequestMetrics.LocalTimeMs.request.Heartbeat.median
+kafka.network.RequestMetrics.LocalTimeMs.request.Heartbeat.98percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Heartbeat.95percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Heartbeat.999percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Heartbeat.99percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.Heartbeat.75percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.LeaderAndIsr.max
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.LeaderAndIsr.mean
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.LeaderAndIsr.min
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.LeaderAndIsr.stddev
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.LeaderAndIsr.median
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.LeaderAndIsr.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.LeaderAndIsr.95percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.LeaderAndIsr.999percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.LeaderAndIsr.99percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.LeaderAndIsr.75percentile
+kafka.network.RequestMetrics.RequestsPerSec.request.ControlledShutdown.count
+kafka.network.RequestMetrics.RequestsPerSec.request.ControlledShutdown.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.ControlledShutdown.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.ControlledShutdown.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.ControlledShutdown.15MinuteRate
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.JoinGroup.max
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.JoinGroup.mean
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.JoinGroup.min
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.JoinGroup.stddev
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.JoinGroup.median
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.JoinGroup.98percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.JoinGroup.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.JoinGroup.999percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.JoinGroup.99percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.JoinGroup.75percentile
+kafka.server.DelayedOperationPurgatory.PurgatorySize.delayedOperation.JoinGroup
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchFollower.max
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchFollower.mean
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchFollower.min
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchFollower.stddev
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchFollower.median
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchFollower.98percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchFollower.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchFollower.999percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchFollower.99percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.FetchFollower.75percentile
+kafka.server.OffsetManager.NumGroups
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetFetch.max
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetFetch.mean
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetFetch.min
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetFetch.stddev
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetFetch.median
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetFetch.98percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetFetch.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetFetch.999percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetFetch.99percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.OffsetFetch.75percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Offsets.max
+kafka.network.RequestMetrics.RemoteTimeMs.request.Offsets.mean
+kafka.network.RequestMetrics.RemoteTimeMs.request.Offsets.min
+kafka.network.RequestMetrics.RemoteTimeMs.request.Offsets.stddev
+kafka.network.RequestMetrics.RemoteTimeMs.request.Offsets.median
+kafka.network.RequestMetrics.RemoteTimeMs.request.Offsets.98percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Offsets.95percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Offsets.999percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Offsets.99percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Offsets.75percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ConsumerMetadata.max
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ConsumerMetadata.mean
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ConsumerMetadata.min
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ConsumerMetadata.stddev
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ConsumerMetadata.median
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ConsumerMetadata.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ConsumerMetadata.95percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ConsumerMetadata.999percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ConsumerMetadata.99percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ConsumerMetadata.75percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.JoinGroup.max
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.JoinGroup.mean
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.JoinGroup.min
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.JoinGroup.stddev
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.JoinGroup.median
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.JoinGroup.98percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.JoinGroup.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.JoinGroup.999percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.JoinGroup.99percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.JoinGroup.75percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ControlledShutdown.max
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ControlledShutdown.mean
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ControlledShutdown.min
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ControlledShutdown.stddev
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ControlledShutdown.median
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ControlledShutdown.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ControlledShutdown.95percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ControlledShutdown.999percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ControlledShutdown.99percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.ControlledShutdown.75percentile
+kafka.server.ReplicaManager.UnderReplicatedPartitions
+kafka.network.RequestMetrics.RequestsPerSec.request.Produce.count
+kafka.network.RequestMetrics.RequestsPerSec.request.Produce.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Produce.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Produce.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Produce.15MinuteRate
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchFollower.max
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchFollower.mean
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchFollower.min
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchFollower.stddev
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchFollower.median
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchFollower.98percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchFollower.95percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchFollower.999percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchFollower.99percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.FetchFollower.75percentile
+kafka.controller.KafkaController.ActiveControllerCount
+kafka.log.Log.NumLogSegments.partition.0.topic.ambari_kafka_service_check
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.StopReplica.max
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.StopReplica.mean
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.StopReplica.min
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.StopReplica.stddev
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.StopReplica.median
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.StopReplica.98percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.StopReplica.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.StopReplica.999percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.StopReplica.99percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.StopReplica.75percentile
+kafka.network.RequestMetrics.RequestsPerSec.request.JoinGroup.count
+kafka.network.RequestMetrics.RequestsPerSec.request.JoinGroup.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.JoinGroup.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.JoinGroup.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.JoinGroup.15MinuteRate
+kafka.controller.KafkaController.PreferredReplicaImbalanceCount
+kafka.server.BrokerTopicMetrics.BytesInPerSec.count
+kafka.server.BrokerTopicMetrics.BytesInPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.BytesInPerSec.meanRate
+kafka.server.BrokerTopicMetrics.BytesInPerSec.5MinuteRate
+kafka.server.BrokerTopicMetrics.BytesInPerSec.15MinuteRate
+kafka.server.DelayedOperationPurgatory.PurgatorySize.delayedOperation.Produce
+kafka.network.RequestMetrics.RemoteTimeMs.request.StopReplica.max
+kafka.network.RequestMetrics.RemoteTimeMs.request.StopReplica.mean
+kafka.network.RequestMetrics.RemoteTimeMs.request.StopReplica.min
+kafka.network.RequestMetrics.RemoteTimeMs.request.StopReplica.stddev
+kafka.network.RequestMetrics.RemoteTimeMs.request.StopReplica.median
+kafka.network.RequestMetrics.RemoteTimeMs.request.StopReplica.98percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.StopReplica.95percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.StopReplica.999percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.StopReplica.99percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.StopReplica.75percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Produce.max
+kafka.network.RequestMetrics.TotalTimeMs.request.Produce.mean
+kafka.network.RequestMetrics.TotalTimeMs.request.Produce.min
+kafka.network.RequestMetrics.TotalTimeMs.request.Produce.stddev
+kafka.network.RequestMetrics.TotalTimeMs.request.Produce.median
+kafka.network.RequestMetrics.TotalTimeMs.request.Produce.98percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Produce.95percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Produce.999percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Produce.99percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Produce.75percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchFollower.max
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchFollower.mean
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchFollower.min
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchFollower.stddev
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchFollower.median
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchFollower.98percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchFollower.95percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchFollower.999percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchFollower.99percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.FetchFollower.75percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.LeaderAndIsr.max
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.LeaderAndIsr.mean
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.LeaderAndIsr.min
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.LeaderAndIsr.stddev
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.LeaderAndIsr.median
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.LeaderAndIsr.98percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.LeaderAndIsr.95percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.LeaderAndIsr.999percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.LeaderAndIsr.99percentile
+kafka.network.RequestMetrics.RequestQueueTimeMs.request.LeaderAndIsr.75percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Fetch.max
+kafka.network.RequestMetrics.RemoteTimeMs.request.Fetch.mean
+kafka.network.RequestMetrics.RemoteTimeMs.request.Fetch.min
+kafka.network.RequestMetrics.RemoteTimeMs.request.Fetch.stddev
+kafka.network.RequestMetrics.RemoteTimeMs.request.Fetch.median
+kafka.network.RequestMetrics.RemoteTimeMs.request.Fetch.98percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Fetch.95percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Fetch.999percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Fetch.99percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.Fetch.75percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.ConsumerMetadata.max
+kafka.network.RequestMetrics.LocalTimeMs.request.ConsumerMetadata.mean
+kafka.network.RequestMetrics.LocalTimeMs.request.ConsumerMetadata.min
+kafka.network.RequestMetrics.LocalTimeMs.request.ConsumerMetadata.stddev
+kafka.network.RequestMetrics.LocalTimeMs.request.ConsumerMetadata.median
+kafka.network.RequestMetrics.LocalTimeMs.request.ConsumerMetadata.98percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.ConsumerMetadata.95percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.ConsumerMetadata.999percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.ConsumerMetadata.99percentile
+kafka.network.RequestMetrics.LocalTimeMs.request.ConsumerMetadata.75percentile
+kafka.log.Log.Size.partition.0.topic.ambari_kafka_service_check
+kafka.network.RequestMetrics.RequestsPerSec.request.Metadata.count
+kafka.network.RequestMetrics.RequestsPerSec.request.Metadata.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Metadata.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Metadata.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Metadata.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetCommit.count
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetCommit.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetCommit.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetCommit.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetCommit.15MinuteRate
+kafka.network.RequestMetrics.RemoteTimeMs.request.ControlledShutdown.max
+kafka.network.RequestMetrics.RemoteTimeMs.request.ControlledShutdown.mean
+kafka.network.RequestMetrics.RemoteTimeMs.request.ControlledShutdown.min
+kafka.network.RequestMetrics.RemoteTimeMs.request.ControlledShutdown.stddev
+kafka.network.RequestMetrics.RemoteTimeMs.request.ControlledShutdown.median
+kafka.network.RequestMetrics.RemoteTimeMs.request.ControlledShutdown.98percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.ControlledShutdown.95percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.ControlledShutdown.999percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.ControlledShutdown.99percentile
+kafka.network.RequestMetrics.RemoteTimeMs.request.ControlledShutdown.75percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Metadata.max
+kafka.network.RequestMetrics.TotalTimeMs.request.Metadata.mean
+kafka.network.RequestMetrics.TotalTimeMs.request.Metadata.min
+kafka.network.RequestMetrics.TotalTimeMs.request.Metadata.stddev
+kafka.network.RequestMetrics.TotalTimeMs.request.Metadata.median
+kafka.network.RequestMetrics.TotalTimeMs.request.Metadata.98percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Metadata.95percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Metadata.999percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Metadata.99percentile
+kafka.network.RequestMetrics.TotalTimeMs.request.Metadata.75percentile
+kafka.network.RequestMetrics.RequestsPerSec.request.Heartbeat.count
+kafka.network.RequestMetrics.RequestsPerSec.request.Heartbeat.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Heartbeat.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Heartbeat.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Heartbeat.5MinuteRate
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/NIMBUS.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/NIMBUS.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/NIMBUS.dat
new file mode 100644
index 0000000..47a76f7
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/NIMBUS.dat
@@ -0,0 +1,7 @@
+Supervisors
+Topologies
+Total Slots
+Used Slots
+Free Slots
+Total Executors
+Total Tasks
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/AMS-HBASE.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/AMS-HBASE.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/AMS-HBASE.dat
new file mode 100644
index 0000000..91b524b
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/AMS-HBASE.dat
@@ -0,0 +1,26 @@
+dfs.FSNamesystem.MissingReplOneBlocks
+dfs.FSNamesystem.TransactionsSinceLastCheckpoint
+dfs.FSNamesystem.MillisSinceLastLoadedEdits
+dfs.FSNamesystem.SnapshottableDirectories
+master.Master.QueueCallTime_median
+dfs.FSNamesystem.LastCheckpointTime
+dfs.FSNamesystem.TotalFiles
+dfs.FSNamesystem.ExpiredHeartbeats
+dfs.FSNamesystem.PostponedMisreplicatedBlocks
+dfs.FSNamesystem.LastWrittenTransactionId
+jvm.JvmMetrics.MemHeapCommittedM
+dfs.FSNamesystem.Snapshots
+dfs.FSNamesystem.TransactionsSinceLastLogRoll
+master.Server.averageLoad
+jvm.JvmMetrics.MemHeapUsedM
+master.AssignmentManger.ritCount
+dfs.FSNamesystem.PendingDataNodeMessageCount
+dfs.FSNamesystem.StaleDataNodes
+|hostname|
+regionserver.Server.totalRequestCount
+regionserver.Server.regionCount
+regionserver.Server.blockCountHitPercen
+regionserver.Server.compactionQueueLength
+regionserver.Server.storeFileCount
+master.Server.averageLoad
+|startTime|endTime|

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/DATANODE.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/DATANODE.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/DATANODE.dat
new file mode 100644
index 0000000..74c121d
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/DATANODE.dat
@@ -0,0 +1,4 @@
+FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes
+FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.DfsUsed
+FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity
+||
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/FLUME_HANDLER.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/FLUME_HANDLER.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/FLUME_HANDLER.dat
new file mode 100644
index 0000000..b8e5e92
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/FLUME_HANDLER.dat
@@ -0,0 +1,63 @@
+ChannelSize._rate._min._sum
+ChannelSize._rate._sum._max
+ChannelSize._rate._min._max
+ChannelSize._rate._avg._min
+ChannelSize._rate._avg._avg
+ChannelSize._rate._max
+ChannelSize._rate._max._min
+ChannelSize._rate._max._avg
+ChannelSize._rate._avg._sum
+ChannelSize._rate._max._sum
+ChannelSize._rate._sum
+ChannelSize._rate._sum._min
+ChannelSize._rate._sum._avg
+ChannelSize._rate._min._avg
+ChannelSize._rate._min._min
+ChannelSize._rate._avg._max
+ChannelSize._rate._max._max
+ChannelSize._rate._avg
+ChannelSize._rate._min
+ChannelSize._rate._sum._sum
+|startTime|endTime|
+EventPutSuccessCount._rate._avg._sum
+EventPutSuccessCount._rate._max._sum
+EventPutSuccessCount._rate._sum._sum
+EventPutSuccessCount._rate._max._max
+EventPutSuccessCount._rate._min._avg
+EventPutSuccessCount._rate._min._min
+EventPutSuccessCount._rate._avg._max
+EventPutSuccessCount._rate._sum._min
+EventPutSuccessCount._rate._sum._avg
+EventPutSuccessCount._rate._min._sum
+EventPutSuccessCount._rate._max
+EventPutSuccessCount._rate._max._avg
+EventPutSuccessCount._rate._avg._avg
+EventPutSuccessCount._rate._max._min
+EventPutSuccessCount._rate._avg._min
+EventPutSuccessCount._rate._avg
+EventPutSuccessCount._rate._min
+EventPutSuccessCount._rate._sum._max
+EventPutSuccessCount._rate._min._max
+EventPutSuccessCount._rate._sum
+|startTime|endTime|
+EventTakeSuccessCount._rate._min._sum
+EventTakeSuccessCount._rate._sum._max
+EventTakeSuccessCount._rate._max
+EventTakeSuccessCount._rate._min._max
+EventTakeSuccessCount._rate._max._max
+EventTakeSuccessCount._rate._min._avg
+EventTakeSuccessCount._rate._min._min
+EventTakeSuccessCount._rate._sum._sum
+EventTakeSuccessCount._rate._sum
+EventTakeSuccessCount._rate._sum._min
+EventTakeSuccessCount._rate._sum._avg
+EventTakeSuccessCount._rate._max._sum
+EventTakeSuccessCount._rate._min
+EventTakeSuccessCount._rate._avg
+EventTakeSuccessCount._rate._avg._min
+EventTakeSuccessCount._rate._avg._avg
+EventTakeSuccessCount._rate._avg._max
+EventTakeSuccessCount._rate._max._avg
+EventTakeSuccessCount._rate._max._min
+EventTakeSuccessCount._rate._avg._sum
+|startTime|endTime|


[35/50] [abbrv] ambari git commit: AMBARI-13409. AMS Load Simulator updates. (Aravindan Vijayan via swagle)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/jmeter.properties
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/jmeter.properties b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/jmeter.properties
new file mode 100644
index 0000000..9fcf7ca
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/jmeter.properties
@@ -0,0 +1,1172 @@
+################################################################################
+# Apache JMeter Property file
+################################################################################
+
+##   Licensed to the Apache Software Foundation (ASF) under one or more
+##   contributor license agreements.  See the NOTICE file distributed with
+##   this work for additional information regarding copyright ownership.
+##   The ASF licenses this file to You under the Apache License, Version 2.0
+##   (the "License"); you may not use this file except in compliance with
+##   the License.  You may obtain a copy of the License at
+## 
+##       http://www.apache.org/licenses/LICENSE-2.0
+## 
+##   Unless required by applicable law or agreed to in writing, software
+##   distributed under the License is distributed on an "AS IS" BASIS,
+##   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+##   See the License for the specific language governing permissions and
+##   limitations under the License.
+
+################################################################################
+#
+#                      THIS FILE SHOULD NOT BE MODIFIED
+#
+# This avoids having to re-apply the modifications when upgrading JMeter
+# Instead only user.properties should be modified:
+# 1/ copy the property you want to modify to user.properties from jmeter.properties
+# 2/ Change its value there
+#
+################################################################################
+
+#Preferred GUI language. Comment out to use the JVM default locale's language.
+#language=en
+
+# Additional locale(s) to add to the displayed list.
+# The current default list is: en, fr, de, no, es, tr, ja, zh_CN, zh_TW, pl, pt_BR
+# [see JMeterMenuBar#makeLanguageMenu()]
+# The entries are a comma-separated list of language names
+#locales.add=zu
+
+# Netscape HTTP Cookie file
+cookies=cookies
+
+#---------------------------------------------------------------------------
+# File format configuration for JMX and JTL files
+#---------------------------------------------------------------------------
+
+# Properties:
+# file_format          - affects both JMX and JTL files
+# file_format.testplan - affects JMX files only
+# file_format.testlog  - affects JTL files only
+#
+# Possible values are:
+# 2.1 - initial format using XStream
+# 2.2 - updated format using XStream, with shorter names
+
+# N.B. format 2.0 (Avalon) is no longer supported
+
+#---------------------------------------------------------------------------
+# XML Parser
+#---------------------------------------------------------------------------
+
+# XML Reader(Parser) - Must implement SAX 2 specs
+xml.parser=org.apache.xerces.parsers.SAXParser
+
+# Path to a Properties file containing Namespace mapping in the form
+# prefix=Namespace
+# Example:
+# ns=http://biz.aol.com/schema/2006-12-18
+#xpath.namespace.config=
+
+#---------------------------------------------------------------------------
+# SSL configuration
+#---------------------------------------------------------------------------
+
+## SSL System properties are now in system.properties
+
+# JMeter no longer converts javax.xxx property entries in this file into System properties.
+# These must now be defined in the system.properties file or on the command-line.
+# The system.properties file gives more flexibility.
+
+# By default, SSL session contexts are now created per-thread, rather than being shared.
+# The original behaviour can be enabled by setting the JMeter property:
+#https.sessioncontext.shared=true
+
+# Default HTTPS protocol level:
+#https.default.protocol=TLS
+# This may need to be changed here (or in user.properties) to:
+#https.default.protocol=SSLv3
+
+# List of protocols to enable. You may have to select only a subset if you find issues with target server.
+# This is needed when server does not support Socket version negotiation, this can lead to:
+# javax.net.ssl.SSLPeerUnverifiedException: peer not authenticated
+# java.net.SocketException: Connection reset
+# see https://issues.apache.org/bugzilla/show_bug.cgi?id=54759
+#https.socket.protocols=SSLv2Hello SSLv3 TLSv1
+
+# Control if we allow reuse of cached SSL context between iterations
+# set the value to 'false' to reset the SSL context each iteration
+#https.use.cached.ssl.context=true
+
+# Start and end index to be used with keystores with many entries
+# The default is to use entry 0, i.e. the first
+#https.keyStoreStartIndex=0
+#https.keyStoreEndIndex=0
+
+#---------------------------------------------------------------------------
+# Look and Feel configuration
+#---------------------------------------------------------------------------
+
+#Classname of the Swing default UI
+#
+# The LAF classnames that are available are now displayed as ToolTip text
+# when hovering over the Options/Look and Feel selection list.
+#
+# You can either use a full class name, as shown above,
+# or one of the strings "System" or "CrossPlatform" which means
+#  JMeter will use the corresponding string returned by UIManager.get<name>LookAndFeelClassName()
+
+# LAF can be overridden by os.name (lowercased, spaces replaced by '_')
+# Sample os.name LAF:
+#jmeter.laf.windows_xp=javax.swing.plaf.metal.MetalLookAndFeel
+
+# Failing that, the OS family = os.name, but only up to first space:
+# Sample OS family LAF:
+#jmeter.laf.windows=com.sun.java.swing.plaf.windows.WindowsLookAndFeel
+
+# Mac apparently looks better with the System LAF
+jmeter.laf.mac=System
+
+# Failing that, the JMeter default laf can be defined:
+#jmeter.laf=System
+
+# If none of the above jmeter.laf properties are defined, JMeter uses the CrossPlatform LAF.
+# This is because the CrossPlatform LAF generally looks better than the System LAF.
+# See https://issues.apache.org/bugzilla/show_bug.cgi?id=52026 for details
+# N.B. the laf can be defined in user.properties.
+
+# LoggerPanel display
+# default to false
+#jmeter.loggerpanel.display=false
+
+# Enable LogViewer Panel to receive log event even if closed
+# Enabled since 2.12
+# Note this has some impact on performances, but as GUI mode must
+# not be used for Load Test it is acceptable
+#jmeter.loggerpanel.enable_when_closed=true
+
+# Error/Fatal Log count display
+# defaults to true
+#jmeter.errorscounter.display=true
+
+# Max characters kept in LoggerPanel, default to 80000 chars
+# O means no limit
+#jmeter.loggerpanel.maxlength=80000
+
+# Toolbar display
+# default:
+#jmeter.toolbar.display=true
+# Toolbar icon definitions
+#jmeter.toolbar.icons=org/apache/jmeter/images/toolbar/icons-toolbar.properties
+# Toolbar list
+#jmeter.toolbar=new,open,close,save,save_as_testplan,|,cut,copy,paste,|,expand,collapse,toggle,|,test_start,test_stop,test_shutdown,|,test_start_remote_all,test_stop_remote_all,test_shutdown_remote_all,|,test_clear,test_clear_all,|,search,search_reset,|,function_helper,help
+# Toolbar icons default size: 22x22. Available sizes are: 22x22, 32x32, 48x48
+#jmeter.toolbar.icons.size=22x22
+
+# Icon definitions
+# default:
+#jmeter.icons=org/apache/jmeter/images/icon.properties
+# alternate:
+#jmeter.icons=org/apache/jmeter/images/icon_1.properties
+
+#Components to not display in JMeter GUI (GUI class name or static label)
+# These elements are deprecated: HTML Parameter Mask,HTTP User Parameter Modifier, Webservice (SOAP) Request
+not_in_menu=org.apache.jmeter.protocol.http.modifier.gui.ParamModifierGui, HTTP User Parameter Modifier, org.apache.jmeter.protocol.http.control.gui.WebServiceSamplerGui
+
+# Number of items in undo history
+# Feature is disabled by default (0)
+# Set it to a number > 0 (25 can be a good default)
+# The bigger it is, the more it consumes memory
+#undo.history.size=0
+
+#---------------------------------------------------------------------------
+# Remote hosts and RMI configuration
+#---------------------------------------------------------------------------
+
+# Remote Hosts - comma delimited
+remote_hosts=127.0.0.1
+#remote_hosts=localhost:1099,localhost:2010
+
+# RMI port to be used by the server (must start rmiregistry with same port)
+#server_port=1099
+
+# To change the port to (say) 1234:
+# On the server(s)
+# - set server_port=1234
+# - start rmiregistry with port 1234
+# On Windows this can be done by:
+# SET SERVER_PORT=1234
+# JMETER-SERVER
+#
+# On Unix:
+# SERVER_PORT=1234 jmeter-server
+#
+# On the client:
+# - set remote_hosts=server:1234
+
+# Parameter that controls the RMI port used by the RemoteSampleListenerImpl (The Controler)
+# Default value is 0 which means port is randomly assigned
+# You may need to open Firewall port on the Controller machine
+#client.rmi.localport=0
+
+# When distributed test is starting, there may be several attempts to initialize
+# remote engines. By default, only single try is made. Increase following property
+# to make it retry for additional times
+#client.tries=1
+
+# If there is initialization retries, following property sets delay between attempts
+#client.retries_delay=5000
+
+# When all initialization tries was made, test will fail if some remote engines are failed
+# Set following property to true to ignore failed nodes and proceed with test 
+#client.continue_on_fail=false
+
+# To change the default port (1099) used to access the server:
+#server.rmi.port=1234
+
+# To use a specific port for the JMeter server engine, define
+# the following property before starting the server:
+#server.rmi.localport=4000
+
+# From JMeter 2.3.1, the jmeter server creates the RMI registry as part of the server process.
+# To stop the server creating the RMI registry:
+#server.rmi.create=false
+
+# From JMeter 2.3.1, define the following property to cause JMeter to exit after the first test
+#server.exitaftertest=true
+
+# Prefix used by IncludeController when building file name
+#includecontroller.prefix=
+
+#---------------------------------------------------------------------------
+#         Logging Configuration
+#---------------------------------------------------------------------------
+
+# Note: JMeter uses Avalon (Excalibur) LogKit
+
+# Logging Format
+# see http://excalibur.apache.org/apidocs/org/apache/log/format/PatternFormatter.html
+
+#
+# Default format:
+#log_format=%{time:yyyy/MM/dd HH:mm:ss} %5.5{priority} - %{category}: %{message} %{throwable}
+# \n is automatically added to the end of the string
+#
+# Predefined formats in the JMeter LoggingManager:
+#log_format_type=default
+#log_format_type=thread_prefix
+#log_format_type=thread_suffix
+# default is as above
+# thread_prefix adds the thread name as a prefix to the category
+# thread_suffix adds the thread name as a suffix to the category
+# Note that thread name is not included by default, as it requires extra processing.
+#
+# To change the logging format, define either log_format_type or log_format
+# If both are defined, the type takes precedence
+# Note that these properties cannot be defined using the -J or -D JMeter
+# command-line flags, as the format will have already been determined by then
+# However, they can be defined as JVM properties
+
+#Logging levels for the logging categories in JMeter.  Correct values are FATAL_ERROR, ERROR, WARN, INFO, and DEBUG
+# To set the log level for a package or individual class, use:
+# log_level.[package_name].[classname]=[PRIORITY_LEVEL]
+# But omit "org.apache" from the package name.  The classname is optional.  Further examples below.
+
+log_level.jmeter=WARN
+log_level.jmeter.junit=WARN
+log_level.jmeter.control=WARN
+log_level.jmeter.testbeans=WARN
+log_level.jmeter.engine=WARN
+log_level.jmeter.threads=WARN
+log_level.jmeter.gui=WARN
+log_level.jmeter.testelement=WARN
+log_level.jmeter.util=WARN
+log_level.jmeter.protocol.http=WARN
+# For CookieManager, AuthManager etc:
+#log_level.jmeter.protocol.http.control=DEBUG
+#log_level.jmeter.protocol.ftp=WARN
+#log_level.jmeter.protocol.jdbc=DEBUG
+#log_level.jmeter.protocol.java=WARN
+#log_level.jmeter.testelements.property=DEBUG
+log_level.jorphan=WARN
+	
+
+#Log file for log messages.
+# You can specify a different log file for different categories via:
+# log_file.[category]=[filename]
+# category is equivalent to the package/class names described above
+
+# Combined log file (for jmeter and jorphan)
+log_file=/var/log/jmeter.log
+# To redirect logging to standard output, try the following:
+# (it will probably report an error, but output will be to stdout)
+#log_file=
+
+# Or define separate logs if required:
+#log_file.jorphan=jorphan.log
+#log_file.jmeter=jmeter.log
+
+# If the filename contains  paired single-quotes, then the name is processed
+# as a SimpleDateFormat format applied to the current date, for example:
+#log_file='jmeter_'yyyyMMddHHmmss'.tmp'
+
+# N.B. When JMeter starts, it sets the system property:
+#    org.apache.commons.logging.Log
+# to
+#    org.apache.commons.logging.impl.LogKitLogger
+# if not already set. This causes Apache and Commons HttpClient to use the same logging as JMeter
+
+# Further logging configuration
+# Excalibur logging provides the facility to configure logging using
+# configuration files written in XML. This allows for such features as
+# log file rotation which are not supported directly by JMeter.
+#
+# If such a file specified, it will be applied to the current logging
+# hierarchy when that has been created.
+# 
+#log_config=logkit.xml
+
+#---------------------------------------------------------------------------
+# HTTP Java configuration
+#---------------------------------------------------------------------------
+
+# Number of connection retries performed by HTTP Java sampler before giving up
+#http.java.sampler.retries=10
+# 0 now means don't retry connection (in 2.3 and before it meant no tries at all!)
+
+#---------------------------------------------------------------------------
+# Commons HTTPClient configuration
+#---------------------------------------------------------------------------
+
+# define a properties file for overriding Commons HttpClient parameters
+# See: http://hc.apache.org/httpclient-3.x/preference-api.html
+# Uncomment this line if you put anything in httpclient.parameters file
+#httpclient.parameters.file=httpclient.parameters
+
+
+# define a properties file for overriding Apache HttpClient parameters
+# See: TBA
+# Uncomment this line if you put anything in hc.parameters file
+#hc.parameters.file=hc.parameters
+
+# Following properties apply to both Commons and Apache HttpClient
+
+# set the socket timeout (or use the parameter http.socket.timeout) 
+# for AJP Sampler and HttpClient3 implementation.
+# Note for HttpClient3 implementation it is better to use GUI to set timeout 
+# or use http.socket.timeout in httpclient.parameters
+# Value is in milliseconds
+#httpclient.timeout=0
+# 0 == no timeout
+
+# Set the http version (defaults to 1.1)
+#httpclient.version=1.0 (or use the parameter http.protocol.version)
+
+# Define characters per second > 0 to emulate slow connections
+#httpclient.socket.http.cps=0
+#httpclient.socket.https.cps=0
+
+#Enable loopback protocol
+#httpclient.loopback=true
+
+# Define the local host address to be used for multi-homed hosts
+#httpclient.localaddress=1.2.3.4
+
+# AuthManager Kerberos configuration
+# Name of application module used in jaas.conf
+#kerberos_jaas_application=JMeter  
+
+# Should ports be stripped from urls before constructing SPNs
+# for spnego authentication
+#kerberos.spnego.strip_port=true
+
+#         Sample logging levels for Commons HttpClient
+#
+# Commons HttpClient Logging information can be found at:
+# http://hc.apache.org/httpclient-3.x/logging.html
+
+# Note that full category names are used, i.e. must include the org.apache.
+# Info level produces no output:
+#log_level.org.apache.commons.httpclient=debug
+# Might be useful:
+#log_level.org.apache.commons.httpclient.Authenticator=trace 
+
+# Show headers only
+#log_level.httpclient.wire.header=debug
+
+# Full wire debug produces a lot of output; consider using separate file:
+#log_level.httpclient.wire=debug
+#log_file.httpclient=httpclient.log
+
+
+#         Apache Commons HttpClient logging examples
+#
+# Enable header wire + context logging - Best for Debugging
+#log_level.org.apache.http=DEBUG
+#log_level.org.apache.http.wire=ERROR
+
+# Enable full wire + context logging
+#log_level.org.apache.http=DEBUG
+
+# Enable context logging for connection management
+#log_level.org.apache.http.impl.conn=DEBUG
+
+# Enable context logging for connection management / request execution
+#log_level.org.apache.http.impl.conn=DEBUG
+#log_level.org.apache.http.impl.client=DEBUG
+#log_level.org.apache.http.client=DEBUG
+
+#---------------------------------------------------------------------------
+# Apache HttpComponents HTTPClient configuration (HTTPClient4)
+#---------------------------------------------------------------------------
+
+# Number of retries to attempt (default 0)
+#httpclient4.retrycount=0
+
+# Idle connection timeout (ms) to apply if the server does not send Keep-Alive headers
+#httpclient4.idletimeout=0
+# Note: this is currently an experimental fix
+
+#---------------------------------------------------------------------------
+# Apache HttpComponents HTTPClient configuration (HTTPClient 3.1)
+#---------------------------------------------------------------------------
+
+# Number of retries to attempt (default 0)
+#httpclient3.retrycount=0
+
+#---------------------------------------------------------------------------
+# HTTP Cache Manager configuration
+#---------------------------------------------------------------------------
+#
+# Space or comma separated list of methods that can be cached
+#cacheable_methods=GET
+# N.B. This property is currently a temporary solution for Bug 56162
+
+# Since 2.12, JMeter does not create anymore a Sample Result with 204 response 
+# code for a resource found in cache which is inline with what browser do.
+#cache_manager.cached_resource_mode=RETURN_NO_SAMPLE
+
+# You can choose between 3 modes:
+# RETURN_NO_SAMPLE (default)
+# RETURN_200_CACHE
+# RETURN_CUSTOM_STATUS
+
+# Those mode have the following behaviours:
+# RETURN_NO_SAMPLE : this mode returns no Sample Result, it has no additional configuration
+# RETURN_200_CACHE : this mode will return Sample Result with response code to 200 and response message to "(ex cache)", you can modify response message by setting 
+# RETURN_200_CACHE.message=(ex cache)
+# RETURN_CUSTOM_STATUS : This mode lets you select what response code and message you want to return, if you use this mode you need to set those properties
+# RETURN_CUSTOM_STATUS.code=
+# RETURN_CUSTOM_STATUS.message=
+
+#---------------------------------------------------------------------------
+# Results file configuration
+#---------------------------------------------------------------------------
+
+# This section helps determine how result data will be saved.
+# The commented out values are the defaults.
+
+# legitimate values: xml, csv, db.  Only xml and csv are currently supported.
+#jmeter.save.saveservice.output_format=csv
+
+
+# true when field should be saved; false otherwise
+
+# assertion_results_failure_message only affects CSV output
+#jmeter.save.saveservice.assertion_results_failure_message=false
+#
+# legitimate values: none, first, all
+#jmeter.save.saveservice.assertion_results=none
+#
+jmeter.save.saveservice.data_type=csv
+jmeter.save.saveservice.label=true
+jmeter.save.saveservice.response_code=true
+# response_data is not currently supported for CSV output
+#jmeter.save.saveservice.response_data=false
+# Save ResponseData for failed samples
+jmeter.save.saveservice.response_data.on_error=false
+jmeter.save.saveservice.response_message=false
+jmeter.save.saveservice.successful=false
+jmeter.save.saveservice.thread_name=false
+jmeter.save.saveservice.time=true
+jmeter.save.saveservice.subresults=false
+jmeter.save.saveservice.assertions=false
+jmeter.save.saveservice.latency=true
+jmeter.save.saveservice.connect_time=true
+#jmeter.save.saveservice.samplerData=false
+#jmeter.save.saveservice.responseHeaders=false
+#jmeter.save.saveservice.requestHeaders=false
+#jmeter.save.saveservice.encoding=false
+jmeter.save.saveservice.bytes=false
+#jmeter.save.saveservice.url=false
+#jmeter.save.saveservice.filename=false
+jmeter.save.saveservice.hostname=false
+jmeter.save.saveservice.thread_counts=false
+jmeter.save.saveservice.sample_count=false
+#jmeter.save.saveservice.idle_time=false
+
+# Timestamp format - this only affects CSV output files
+# legitimate values: none, ms, or a format suitable for SimpleDateFormat
+#jmeter.save.saveservice.timestamp_format=ms
+jmeter.save.saveservice.timestamp_format=HH:mm:ss
+
+# For use with Comma-separated value (CSV) files or other formats
+# where the fields' values are separated by specified delimiters.
+# Default:
+#jmeter.save.saveservice.default_delimiter=,
+# For TAB, since JMeter 2.3 one can use:
+jmeter.save.saveservice.default_delimiter=;
+
+# Only applies to CSV format files:
+jmeter.save.saveservice.print_field_names=true
+
+# Optional list of JMeter variable names whose values are to be saved in the result data files.
+# Use commas to separate the names. For example:
+#sample_variables=SESSION_ID,REFERENCE
+# N.B. The current implementation saves the values in XML as attributes,
+# so the names must be valid XML names.
+# Versions of JMeter after 2.3.2 send the variable to all servers
+# to ensure that the correct data is available at the client.
+
+# Optional xml processing instruction for line 2 of the file:
+#jmeter.save.saveservice.xml_pi=<?xml-stylesheet type="text/xsl" href="../extras/jmeter-results-detail-report_21.xsl"?>
+
+# Prefix used to identify filenames that are relative to the current base
+#jmeter.save.saveservice.base_prefix=~/
+
+# AutoFlush on each line written in XML or CSV output
+# Setting this to true will result in less test results data loss in case of Crash
+# but with impact on performances, particularly for intensive tests (low or no pauses)
+# Since JMeter 2.10, this is false by default:q!
+jmeter.save.saveservice.autoflush=true
+
+#---------------------------------------------------------------------------
+# Settings that affect SampleResults
+#---------------------------------------------------------------------------
+
+# Save the start time stamp instead of the end
+# This also affects the timestamp stored in result files
+sampleresult.timestamp.start=true
+
+# Whether to use System.nanoTime() - otherwise only use System.currentTimeMillis()
+#sampleresult.useNanoTime=true
+
+# Use a background thread to calculate the nanoTime offset
+# Set this to <= 0 to disable the background thread
+#sampleresult.nanoThreadSleep=5000
+
+#---------------------------------------------------------------------------
+# Upgrade property
+#---------------------------------------------------------------------------
+
+# File that holds a record of name changes for backward compatibility issues
+upgrade_properties=/bin/upgrade.properties
+
+#---------------------------------------------------------------------------
+# JMeter Test Script recorder configuration
+#
+# N.B. The element was originally called the Proxy recorder, which is why the
+# properties have the prefix "proxy".
+#---------------------------------------------------------------------------
+
+# If the recorder detects a gap of at least 5s (default) between HTTP requests,
+# it assumes that the user has clicked a new URL
+#proxy.pause=5000
+
+# Add numeric prefix to Sampler names (default true)
+#proxy.number.requests=true
+
+# List of URL patterns that will be added to URL Patterns to exclude
+# Separate multiple lines with ;
+#proxy.excludes.suggested=.*\\.(bmp|css|js|gif|ico|jpe?g|png|swf|woff)
+
+# Change the default HTTP Sampler (currently HttpClient4)
+# Java:
+#jmeter.httpsampler=HTTPSampler
+#or
+#jmeter.httpsampler=Java
+#
+# Apache HTTPClient:
+#jmeter.httpsampler=HTTPSampler2
+#or
+#jmeter.httpsampler=HttpClient3.1
+#
+# HttpClient4.x
+#jmeter.httpsampler=HttpClient4
+
+# By default JMeter tries to be more lenient with RFC2616 redirects and allows
+# relative paths.
+# If you want to test strict conformance, set this value to true
+# When the property is true, JMeter follows http://tools.ietf.org/html/rfc3986#section-5.2
+#jmeter.httpclient.strict_rfc2616=false
+
+# Default content-type include filter to use
+#proxy.content_type_include=text/html|text/plain|text/xml
+# Default content-type exclude filter to use
+#proxy.content_type_exclude=image/.*|text/css|application/.*
+
+# Default headers to remove from Header Manager elements
+# (Cookie and Authorization are always removed)
+#proxy.headers.remove=If-Modified-Since,If-None-Match,Host
+
+# Binary content-type handling
+# These content-types will be handled by saving the request in a file:
+#proxy.binary.types=application/x-amf,application/x-java-serialized-object
+# The files will be saved in this directory:
+#proxy.binary.directory=user.dir
+# The files will be created with this file filesuffix:
+#proxy.binary.filesuffix=.binary
+
+#---------------------------------------------------------------------------
+# Test Script Recorder certificate configuration
+#---------------------------------------------------------------------------
+
+#proxy.cert.directory=<JMeter bin directory>
+#proxy.cert.file=proxyserver.jks
+#proxy.cert.type=JKS
+#proxy.cert.keystorepass=password
+#proxy.cert.keypassword=password
+#proxy.cert.factory=SunX509
+# define this property if you wish to use your own keystore
+#proxy.cert.alias=<none>
+# The default validity for certificates created by JMeter
+#proxy.cert.validity=7
+# Use dynamic key generation (if supported by JMeter/JVM)
+# If false, will revert to using a single key with no certificate
+#proxy.cert.dynamic_keys=true
+
+#---------------------------------------------------------------------------
+# Test Script Recorder miscellaneous configuration
+#---------------------------------------------------------------------------
+
+# Whether to attempt disabling of samples that resulted from redirects
+# where the generated samples use auto-redirection
+#proxy.redirect.disabling=true
+
+# SSL configuration
+#proxy.ssl.protocol=TLS
+
+#---------------------------------------------------------------------------
+# JMeter Proxy configuration
+#---------------------------------------------------------------------------
+# use command-line flags for user-name and password
+#http.proxyDomain=NTLM domain, if required by HTTPClient sampler
+
+#---------------------------------------------------------------------------
+# HTTPSampleResponse Parser configuration
+#---------------------------------------------------------------------------
+
+# Space-separated list of parser groups
+HTTPResponse.parsers=htmlParser wmlParser
+# for each parser, there should be a parser.types and a parser.className property
+
+#---------------------------------------------------------------------------
+# HTML Parser configuration
+#---------------------------------------------------------------------------
+
+# Define the HTML parser to be used.
+# Default parser:
+# This new parser (since 2.10) should perform better than all others
+# see https://issues.apache.org/bugzilla/show_bug.cgi?id=55632
+#htmlParser.className=org.apache.jmeter.protocol.http.parser.LagartoBasedHtmlParser
+
+# Other parsers:
+# Default parser before 2.10
+#htmlParser.className=org.apache.jmeter.protocol.http.parser.HtmlParserHTMLParser
+#htmlParser.className=org.apache.jmeter.protocol.http.parser.JTidyHTMLParser
+# Note that Regexp extractor may detect references that have been commented out.
+# In many cases it will work OK, but you should be aware that it may generate 
+# additional references.
+#htmlParser.className=org.apache.jmeter.protocol.http.parser.RegexpHTMLParser
+# This parser is based on JSoup, it should be the most accurate but less performant
+# than LagartoBasedHtmlParser
+#htmlParser.className=org.apache.jmeter.protocol.http.parser.JsoupBasedHtmlParser
+
+#Used by HTTPSamplerBase to associate htmlParser with content types below 
+htmlParser.types=text/html application/xhtml+xml application/xml text/xml
+
+#---------------------------------------------------------------------------
+# WML Parser configuration
+#---------------------------------------------------------------------------
+
+wmlParser.className=org.apache.jmeter.protocol.http.parser.RegexpHTMLParser
+
+#Used by HTTPSamplerBase to associate wmlParser with content types below 
+wmlParser.types=text/vnd.wap.wml 
+
+#---------------------------------------------------------------------------
+# Remote batching configuration
+#---------------------------------------------------------------------------
+# How is Sample sender implementations configured:
+# - true (default) means client configuration will be used
+# - false means server configuration will be used
+#sample_sender_client_configured=true
+
+# Remote batching support
+# Since JMeter 2.9, default is MODE_STRIPPED_BATCH, which returns samples in
+# batch mode (every 100 samples or every minute by default)
+# Note also that MODE_STRIPPED_BATCH strips response data from SampleResult, so if you need it change to
+# another mode
+# Hold retains samples until end of test (may need lots of memory)
+# Batch returns samples in batches
+# Statistical returns sample summary statistics
+# hold_samples was originally defined as a separate property,
+# but can now also be defined using mode=Hold
+# mode can also be the class name of an implementation of org.apache.jmeter.samplers.SampleSender
+#mode=Standard
+#mode=Batch
+#mode=Hold
+#mode=Statistical
+#Set to true to key statistical samples on threadName rather than threadGroup
+#key_on_threadname=false
+#mode=Stripped
+#mode=StrippedBatch
+#mode=org.example.load.MySampleSender
+#
+#num_sample_threshold=100
+# Value is in milliseconds
+#time_threshold=60000
+#
+# Asynchronous sender; uses a queue and background worker process to return the samples
+#mode=Asynch
+# default queue size
+#asynch.batch.queue.size=100
+# Same as Asynch but strips response data from SampleResult
+#mode=StrippedAsynch
+#
+# DiskStore: as for Hold mode, but serialises the samples to disk, rather than saving in memory
+#mode=DiskStore
+# Same as DiskStore but strips response data from SampleResult
+#mode=StrippedDiskStore
+# Note: the mode is currently resolved on the client; 
+# other properties (e.g. time_threshold) are resolved on the server.
+
+# To set the Monitor Health Visualiser buffer size, enter the desired value
+# monitor.buffer.size=800
+
+#---------------------------------------------------------------------------
+# JDBC Request configuration
+#---------------------------------------------------------------------------
+
+# Max number of PreparedStatements per Connection for PreparedStatement cache
+#jdbcsampler.maxopenpreparedstatements=100
+
+# String used to indicate a null value
+#jdbcsampler.nullmarker=]NULL[
+
+#---------------------------------------------------------------------------
+# OS Process Sampler configuration
+#---------------------------------------------------------------------------
+# Polling to see if process has finished its work, used when a timeout is configured on sampler
+#os_sampler.poll_for_timeout=100
+
+#---------------------------------------------------------------------------
+# TCP Sampler configuration
+#---------------------------------------------------------------------------
+
+# The default handler class
+#tcp.handler=TCPClientImpl
+#
+# eolByte = byte value for end of line
+# set this to a value outside the range -128 to +127 to skip eol checking
+#tcp.eolByte=1000
+#
+# TCP Charset, used by org.apache.jmeter.protocol.tcp.sampler.TCPClientImpl
+# default to Platform defaults charset as returned by Charset.defaultCharset().name()
+#tcp.charset=
+#
+# status.prefix and suffix = strings that enclose the status response code
+#tcp.status.prefix=Status=
+#tcp.status.suffix=.
+#
+# status.properties = property file to convert codes to messages
+#tcp.status.properties=mytestfiles/tcpstatus.properties
+
+# The length prefix used by LengthPrefixedBinaryTCPClientImpl implementation
+# defaults to 2 bytes.
+#tcp.binarylength.prefix.length=2
+
+#---------------------------------------------------------------------------
+# Summariser - Generate Summary Results - configuration (mainly applies to non-GUI mode)
+#---------------------------------------------------------------------------
+#
+# Define the following property to automatically start a summariser with that name
+# (applies to non-GUI mode only)
+#summariser.name=summary
+#
+# interval between summaries (in seconds) default 30 seconds
+#summariser.interval=30
+#
+# Write messages to log file
+#summariser.log=true
+#
+# Write messages to System.out
+#summariser.out=true
+
+
+#---------------------------------------------------------------------------
+# Aggregate Report and Aggregate Graph - configuration
+#---------------------------------------------------------------------------
+#
+# Percentiles to display in reports
+# Can be float value between 0 and 100
+# First percentile to display, defaults to 90%
+#aggregate_rpt_pct1=90
+# Second percentile to display, defaults to 95%
+#aggregate_rpt_pct2=95
+# Second percentile to display, defaults to 99%
+#aggregate_rpt_pct3=99
+
+#---------------------------------------------------------------------------
+# Aggregate Report and Aggregate Graph - configuration
+#---------------------------------------------------------------------------
+#
+# Backend metrics sliding window size for Percentiles, Min, Max
+#backend_metrics_window=100
+
+#---------------------------------------------------------------------------
+# BeanShell configuration
+#---------------------------------------------------------------------------
+
+# BeanShell Server properties
+#
+# Define the port number as non-zero to start the http server on that port
+#beanshell.server.port=9000
+# The telnet server will be started on the next port
+
+#
+# Define the server initialisation file
+beanshell.server.file=../extras/startup.bsh
+
+#
+# Define a file to be processed at startup
+# This is processed using its own interpreter.
+#beanshell.init.file=
+
+#
+# Define the intialisation files for BeanShell Sampler, Function and other BeanShell elements
+# N.B. Beanshell test elements do not share interpreters.
+#      Each element in each thread has its own interpreter.
+#      This is retained between samples.
+#beanshell.sampler.init=BeanShellSampler.bshrc
+#beanshell.function.init=BeanShellFunction.bshrc
+#beanshell.assertion.init=BeanShellAssertion.bshrc
+#beanshell.listener.init=etc
+#beanshell.postprocessor.init=etc
+#beanshell.preprocessor.init=etc
+#beanshell.timer.init=etc
+
+# The file BeanShellListeners.bshrc contains sample definitions
+# of Test and Thread Listeners.
+
+#---------------------------------------------------------------------------
+# MailerModel configuration
+#---------------------------------------------------------------------------
+
+# Number of successful samples before a message is sent
+#mailer.successlimit=2
+#
+# Number of failed samples before a message is sent
+#mailer.failurelimit=2
+
+#---------------------------------------------------------------------------
+# CSVRead configuration
+#---------------------------------------------------------------------------
+
+# CSVRead delimiter setting (default ",")
+# Make sure that there are no trailing spaces or tabs after the delimiter
+# characters, or these will be included in the list of valid delimiters
+#csvread.delimiter=,
+#csvread.delimiter=;
+#csvread.delimiter=!
+#csvread.delimiter=~
+# The following line has a tab after the =
+#csvread.delimiter=	
+
+#---------------------------------------------------------------------------
+# __time() function configuration
+#
+# The properties below can be used to redefine the default formats
+#---------------------------------------------------------------------------
+#time.YMD=yyyyMMdd
+#time.HMS=HHmmss
+#time.YMDHMS=yyyyMMdd-HHmmss
+#time.USER1=
+#time.USER2=
+
+#---------------------------------------------------------------------------
+# CSV DataSet configuration
+#---------------------------------------------------------------------------
+
+# String to return at EOF (if recycle not used)
+#csvdataset.eofstring=<EOF>
+
+#---------------------------------------------------------------------------
+# LDAP Sampler configuration
+#---------------------------------------------------------------------------
+# Maximum number of search results returned by a search that will be sorted
+# to guarantee a stable ordering (if more results then this limit are retruned
+# then no sorting is done). Set to 0 to turn off all sorting, in which case
+# "Equals" response assertions will be very likely to fail against search results.
+#
+#ldapsampler.max_sorted_results=1000
+ 
+# Number of characters to log for each of three sections (starting matching section, diff section,
+#   ending matching section where not all sections will appear for all diffs) diff display when an Equals
+#   assertion fails. So a value of 100 means a maximum of 300 characters of diff text will be displayed
+#   (+ a number of extra characters like "..." and "[[["/"]]]" which are used to decorate it).
+#assertion.equals_section_diff_len=100
+# test written out to log to signify start/end of diff delta
+#assertion.equals_diff_delta_start=[[[
+#assertion.equals_diff_delta_end=]]]
+
+#---------------------------------------------------------------------------
+# Miscellaneous configuration
+#---------------------------------------------------------------------------
+
+# If defined, then start the mirror server on the port
+#mirror.server.port=8081
+
+# ORO PatternCacheLRU size
+#oro.patterncache.size=1000
+
+#TestBeanGui
+#
+#propertyEditorSearchPath=null
+
+# Turn expert mode on/off: expert mode will show expert-mode beans and properties
+#jmeter.expertMode=true
+
+# Maximum redirects to follow in a single sequence (default 5)
+#httpsampler.max_redirects=5
+# Maximum frame/iframe nesting depth (default 5)
+#httpsampler.max_frame_depth=5
+# Maximum await termination timeout (secs) when concurrent download embedded resources (default 60)
+#httpsampler.await_termination_timeout=60
+# Revert to BUG 51939 behaviour (no separate container for embedded resources) by setting the following false:
+#httpsampler.separate.container=true
+
+# If embedded resources download fails due to missing resources or other reasons, if this property is true
+# Parent sample will not be marked as failed 
+#httpsampler.ignore_failed_embedded_resources=false
+
+# The encoding to be used if none is provided (default ISO-8859-1)
+#sampleresult.default.encoding=ISO-8859-1
+
+# Network response size calculation method
+# Use real size: number of bytes for response body return by webserver
+# (i.e. the network bytes received for response)
+# if set to false, the (uncompressed) response data size will used (default before 2.5)
+# Include headers: add the headers size in real size
+#sampleresult.getbytes.body_real_size=true
+#sampleresult.getbytes.headers_size=true
+
+# CookieManager behaviour - should cookies with null/empty values be deleted?
+# Default is true. Use false to revert to original behaviour
+#CookieManager.delete_null_cookies=true
+
+# CookieManager behaviour - should variable cookies be allowed?
+# Default is true. Use false to revert to original behaviour
+#CookieManager.allow_variable_cookies=true
+
+# CookieManager behaviour - should Cookies be stored as variables?
+# Default is false
+#CookieManager.save.cookies=false
+
+# CookieManager behaviour - prefix to add to cookie name before storing it as a variable
+# Default is COOKIE_; to remove the prefix, define it as one or more spaces
+#CookieManager.name.prefix=
+ 
+# CookieManager behaviour - check received cookies are valid before storing them?
+# Default is true. Use false to revert to previous behaviour
+#CookieManager.check.cookies=true
+
+# (2.0.3) JMeterThread behaviour has been changed to set the started flag before
+# the controllers are initialised. This is so controllers can access variables earlier. 
+# In case this causes problems, the previous behaviour can be restored by uncommenting
+# the following line.
+#jmeterthread.startearlier=false
+
+# (2.2.1) JMeterThread behaviour has changed so that PostProcessors are run in forward order
+# (as they appear in the test plan) rather than reverse order as previously.
+# Uncomment the following line to revert to the original behaviour
+#jmeterthread.reversePostProcessors=true
+
+# (2.2) StandardJMeterEngine behaviour has been changed to notify the listeners after
+# the running version is enabled. This is so they can access variables. 
+# In case this causes problems, the previous behaviour can be restored by uncommenting
+# the following line.
+#jmeterengine.startlistenerslater=false
+
+# Number of milliseconds to wait for a thread to stop
+#jmeterengine.threadstop.wait=5000
+
+#Whether to invoke System.exit(0) in server exit code after stopping RMI
+#jmeterengine.remote.system.exit=false
+
+# Whether to call System.exit(1) on failure to stop threads in non-GUI mode.
+# This only takes effect if the test was explictly requested to stop.
+# If this is disabled, it may be necessary to kill the JVM externally
+#jmeterengine.stopfail.system.exit=true
+
+# Whether to force call System.exit(0) at end of test in non-GUI mode, even if
+# there were no failures and the test was not explicitly asked to stop.
+# Without this, the JVM may never exit if there are other threads spawned by
+# the test which never exit.
+#jmeterengine.force.system.exit=false
+
+# How long to pause (in ms) in the daemon thread before reporting that the JVM has failed to exit.
+# If the value is <= 0, the JMeter does not start the daemon thread 
+#jmeter.exit.check.pause=2000
+
+# If running non-GUI, then JMeter listens on the following port for a shutdown message.
+# To disable, set the port to 1000 or less.
+#jmeterengine.nongui.port=4445
+#
+# If the initial port is busy, keep trying until this port is reached
+# (to disable searching, set the value less than or equal to the .port property)
+#jmeterengine.nongui.maxport=4455
+
+# How often to check for shutdown during ramp-up (milliseconds)
+#jmeterthread.rampup.granularity=1000
+
+#Should JMeter expand the tree when loading a test plan?
+# default value is false since JMeter 2.7
+#onload.expandtree=false
+
+#JSyntaxTextArea configuration
+#jsyntaxtextarea.wrapstyleword=true
+#jsyntaxtextarea.linewrap=true
+#jsyntaxtextarea.codefolding=true
+# Set 0 to disable undo feature in JSyntaxTextArea
+#jsyntaxtextarea.maxundos=50
+
+# Set this to false to disable the use of JSyntaxTextArea for the Console Logger panel 
+#loggerpanel.usejsyntaxtext=true
+
+# Maximum size of HTML page that can be displayed; default=200 * 1024
+# Set to 0 to disable the size check and display the whole response
+#view.results.tree.max_size=204800
+
+# Order of Renderers in View Results Tree
+# Note full class names should be used for non jmeter core renderers
+# For JMeter core renderers, class names start with . and are automatically
+# prefixed with org.apache.jmeter.visualizers
+view.results.tree.renderers_order=.RenderAsText,.RenderAsRegexp,.RenderAsCssJQuery,.RenderAsXPath,.RenderAsHTML,.RenderAsHTMLWithEmbedded,.RenderAsDocument,.RenderAsJSON,.RenderAsXML
+
+# Maximum size of Document that can be parsed by Tika engine; defaut=10 * 1024 * 1024 (10MB)
+# Set to 0 to disable the size check
+#document.max_size=0
+
+#JMS options
+# Enable the following property to stop JMS Point-to-Point Sampler from using
+# the properties java.naming.security.[principal|credentials] when creating the queue connection
+#JMSSampler.useSecurity.properties=false
+
+# Set the following value to true in order to skip the delete confirmation dialogue
+#confirm.delete.skip=false
+
+# Used by Webservice Sampler (SOAP)
+# Size of Document Cache
+#soap.document_cache=50
+
+# Used by JSR223 elements
+# Size of compiled scripts cache
+#jsr223.compiled_scripts_cache_size=100
+
+#---------------------------------------------------------------------------
+# Classpath configuration
+#---------------------------------------------------------------------------
+
+# List of paths (separated by ;) to search for additional JMeter plugin classes,
+# for example new GUI elements and samplers.
+# A path item can either be a jar file or a directory.
+# Any jar file in such a directory will be automatically included,
+# jar files in sub directories are ignored.
+# The given value is in addition to any jars found in the lib/ext directory.
+# Do not use this for utility or plugin dependency jars.
+#search_paths=/app1/lib;/app2/lib
+
+# List of paths that JMeter will search for utility and plugin dependency classes.
+# Use your platform path separator to separate multiple paths.
+# A path item can either be a jar file or a directory.
+# Any jar file in such a directory will be automatically included,
+# jar files in sub directories are ignored.
+# The given value is in addition to any jars found in the lib directory.
+# All entries will be added to the class path of the system class loader
+# and also to the path of the JMeter internal loader.
+# Paths with spaces may cause problems for the JVM
+#user.classpath=../classes;../lib;../app1/jar1.jar;../app2/jar2.jar
+
+# List of paths (separated by ;) that JMeter will search for utility
+# and plugin dependency classes.
+# A path item can either be a jar file or a directory.
+# Any jar file in such a directory will be automatically included,
+# jar files in sub directories are ignored.
+# The given value is in addition to any jars found in the lib directory
+# or given by the user.classpath property.
+# All entries will be added to the path of the JMeter internal loader only.
+# For plugin dependencies using plugin_dependency_paths should be preferred over
+# user.classpath.
+#plugin_dependency_paths=../dependencies/lib;../app1/jar1.jar;../app2/jar2.jar
+
+# Classpath finder
+# ================
+# The classpath finder currently needs to load every single JMeter class to find
+# the classes it needs.
+# For non-GUI mode, it's only necessary to scan for Function classes, but all classes
+# are still loaded.
+# All current Function classes include ".function." in their name,
+# and none include ".gui." in the name, so the number of unwanted classes loaded can be
+# reduced by checking for these. However, if a valid function class name does not match
+# these restrictions, it will not be loaded. If problems are encountered, then comment
+# or change the following properties:
+classfinder.functions.contain=.functions.
+classfinder.functions.notContain=.gui.
+
+#---------------------------------------------------------------------------
+# Additional property files to load
+#---------------------------------------------------------------------------
+
+# Should JMeter automatically load additional JMeter properties?
+# File name to look for (comment to disable)
+#user.properties=user.properties
+
+# Should JMeter automatically load additional system properties?
+# File name to look for (comment to disable)
+#system.properties=system.properties
+
+# Comma separated list of files that contain reference to templates and their description
+# Path must be relative to jmeter root folder
+#template.files=/bin/templates/templates.xml
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/saveservice.properties
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/saveservice.properties b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/saveservice.properties
new file mode 100644
index 0000000..bae1168
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/saveservice.properties
@@ -0,0 +1,381 @@
+#---------------------------------------------------------
+#         SAVESERVICE PROPERTIES - JMETER INTERNAL USE ONLY
+#---------------------------------------------------------
+
+##   Licensed to the Apache Software Foundation (ASF) under one or more
+##   contributor license agreements.  See the NOTICE file distributed with
+##   this work for additional information regarding copyright ownership.
+##   The ASF licenses this file to You under the Apache License, Version 2.0
+##   (the "License"); you may not use this file except in compliance with
+##   the License.  You may obtain a copy of the License at
+##
+##       http://www.apache.org/licenses/LICENSE-2.0
+##
+##   Unless required by applicable law or agreed to in writing, software
+##   distributed under the License is distributed on an "AS IS" BASIS,
+##   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+##   See the License for the specific language governing permissions and
+##   limitations under the License.
+
+# This file is used to define how XStream (de-)serializes classnames
+# in JMX test plan files.
+
+#      FOR JMETER INTERNAL USE ONLY
+
+#---------------------------------------------------------
+
+# N.B. To ensure backward compatibility, please do NOT change or delete any entries
+
+# New entries can be added as necessary.
+#
+# Note that keys starting with an underscore are special,
+# and are not used as aliases.
+#
+# Please keep the entries in alphabetical order within the sections
+# to reduce the likelihood of duplicates
+#
+# version number of this file (automatically generated by SVN)
+_file_version=$Revision: 1656252 $
+#
+# Conversion version (for JMX output files)
+# Must be updated if the file has been changed since the previous release
+# Format is:
+# Save service version=JMeter version at which change occured
+# 1.7 = 2.1.1
+# 1.8 = 2.1.2
+# (Some version updates were missed here...)
+# 2.0 = 2.3.1
+# 2.1 = 2.3.2
+# (Some version updates were missed here...)
+# 2.2 = 2.6
+# 2.3 = 2.7
+# 2.4 = 2.9
+# 2.5 = 2.10
+# 2.6 = 2.11
+# 2.7 = 2.12
+# 2.8 = 2.13
+_version=2.8
+#
+#
+# Character set encoding used to read and write JMeter XML files and CSV results
+#
+_file_encoding=UTF-8
+#
+#---------------------------------------------------------
+#
+# The following properties are used to create aliases
+# [Must all start with capital letter]
+#
+AccessLogSampler=org.apache.jmeter.protocol.http.sampler.AccessLogSampler
+AjpSampler=org.apache.jmeter.protocol.http.sampler.AjpSampler
+AjpSamplerGui=org.apache.jmeter.protocol.http.control.gui.AjpSamplerGui
+AnchorModifier=org.apache.jmeter.protocol.http.modifier.AnchorModifier
+AnchorModifierGui=org.apache.jmeter.protocol.http.modifier.gui.AnchorModifierGui
+Argument=org.apache.jmeter.config.Argument
+Arguments=org.apache.jmeter.config.Arguments
+ArgumentsPanel=org.apache.jmeter.config.gui.ArgumentsPanel
+AssertionGui=org.apache.jmeter.assertions.gui.AssertionGui
+AssertionVisualizer=org.apache.jmeter.visualizers.AssertionVisualizer
+AuthManager=org.apache.jmeter.protocol.http.control.AuthManager
+Authorization=org.apache.jmeter.protocol.http.control.Authorization
+AuthPanel=org.apache.jmeter.protocol.http.gui.AuthPanel
+BackendListener=org.apache.jmeter.visualizers.backend.BackendListener
+BackendListenerGui=org.apache.jmeter.visualizers.backend.BackendListenerGui
+BeanShellAssertion=org.apache.jmeter.assertions.BeanShellAssertion
+BeanShellAssertionGui=org.apache.jmeter.assertions.gui.BeanShellAssertionGui
+BeanShellListener=org.apache.jmeter.visualizers.BeanShellListener
+BeanShellPostProcessor=org.apache.jmeter.extractor.BeanShellPostProcessor
+BeanShellPreProcessor=org.apache.jmeter.modifiers.BeanShellPreProcessor
+BeanShellSampler=org.apache.jmeter.protocol.java.sampler.BeanShellSampler
+BeanShellSamplerGui=org.apache.jmeter.protocol.java.control.gui.BeanShellSamplerGui
+BeanShellTimer=org.apache.jmeter.timers.BeanShellTimer
+BSFAssertion=org.apache.jmeter.assertions.BSFAssertion
+BSFListener=org.apache.jmeter.visualizers.BSFListener
+BSFPreProcessor=org.apache.jmeter.modifiers.BSFPreProcessor
+BSFPostProcessor=org.apache.jmeter.extractor.BSFPostProcessor
+BSFSampler=org.apache.jmeter.protocol.java.sampler.BSFSampler
+BSFSamplerGui=org.apache.jmeter.protocol.java.control.gui.BSFSamplerGui
+BSFTimer=org.apache.jmeter.timers.BSFTimer
+CacheManager=org.apache.jmeter.protocol.http.control.CacheManager
+CacheManagerGui=org.apache.jmeter.protocol.http.gui.CacheManagerGui
+CompareAssertion=org.apache.jmeter.assertions.CompareAssertion
+ComparisonVisualizer=org.apache.jmeter.visualizers.ComparisonVisualizer
+ConfigTestElement=org.apache.jmeter.config.ConfigTestElement
+ConstantThroughputTimer=org.apache.jmeter.timers.ConstantThroughputTimer
+ConstantTimer=org.apache.jmeter.timers.ConstantTimer
+ConstantTimerGui=org.apache.jmeter.timers.gui.ConstantTimerGui
+Cookie=org.apache.jmeter.protocol.http.control.Cookie
+CookieManager=org.apache.jmeter.protocol.http.control.CookieManager
+CookiePanel=org.apache.jmeter.protocol.http.gui.CookiePanel
+CounterConfig=org.apache.jmeter.modifiers.CounterConfig
+CriticalSectionController=org.apache.jmeter.control.CriticalSectionController
+CriticalSectionControllerGui=org.apache.jmeter.control.gui.CriticalSectionControllerGui
+CounterConfigGui=org.apache.jmeter.modifiers.gui.CounterConfigGui
+CSVDataSet=org.apache.jmeter.config.CSVDataSet
+DebugPostProcessor=org.apache.jmeter.extractor.DebugPostProcessor
+DebugSampler=org.apache.jmeter.sampler.DebugSampler
+DistributionGraphVisualizer=org.apache.jmeter.visualizers.DistributionGraphVisualizer
+DNSCacheManager=org.apache.jmeter.protocol.http.control.DNSCacheManager
+DNSCachePanel=org.apache.jmeter.protocol.http.gui.DNSCachePanel
+DurationAssertion=org.apache.jmeter.assertions.DurationAssertion
+DurationAssertionGui=org.apache.jmeter.assertions.gui.DurationAssertionGui
+# Should really have been defined as floatProp to agree with other properties
+# No point changing this now
+FloatProperty=org.apache.jmeter.testelement.property.FloatProperty
+ForeachController=org.apache.jmeter.control.ForeachController
+ForeachControlPanel=org.apache.jmeter.control.gui.ForeachControlPanel
+FtpConfigGui=org.apache.jmeter.protocol.ftp.config.gui.FtpConfigGui
+FTPSampler=org.apache.jmeter.protocol.ftp.sampler.FTPSampler
+FtpTestSamplerGui=org.apache.jmeter.protocol.ftp.control.gui.FtpTestSamplerGui
+GaussianRandomTimer=org.apache.jmeter.timers.GaussianRandomTimer
+GaussianRandomTimerGui=org.apache.jmeter.timers.gui.GaussianRandomTimerGui
+GenericController=org.apache.jmeter.control.GenericController
+GraphAccumVisualizer=org.apache.jmeter.visualizers.GraphAccumVisualizer
+GraphVisualizer=org.apache.jmeter.visualizers.GraphVisualizer
+Header=org.apache.jmeter.protocol.http.control.Header
+HeaderManager=org.apache.jmeter.protocol.http.control.HeaderManager
+HeaderPanel=org.apache.jmeter.protocol.http.gui.HeaderPanel
+HTMLAssertion=org.apache.jmeter.assertions.HTMLAssertion
+HTMLAssertionGui=org.apache.jmeter.assertions.gui.HTMLAssertionGui
+HTTPArgument=org.apache.jmeter.protocol.http.util.HTTPArgument
+HTTPArgumentsPanel=org.apache.jmeter.protocol.http.gui.HTTPArgumentsPanel
+HTTPFileArg=org.apache.jmeter.protocol.http.util.HTTPFileArg
+HTTPFileArgs=org.apache.jmeter.protocol.http.util.HTTPFileArgs
+HttpDefaultsGui=org.apache.jmeter.protocol.http.config.gui.HttpDefaultsGui
+HtmlExtractor=org.apache.jmeter.extractor.HtmlExtractor
+HtmlExtractorGui=org.apache.jmeter.extractor.gui.HtmlExtractorGui
+# removed in r1039684, probably not released. Not present in r322831 or since.
+#HttpGenericSampler=org.apache.jmeter.protocol.http.sampler.HttpGenericSampler
+# removed in r1039684, probably not released. Not present in r322831 or since.
+#HttpGenericSamplerGui=org.apache.jmeter.protocol.http.control.gui.HttpGenericSamplerGui
+HttpMirrorControl=org.apache.jmeter.protocol.http.control.HttpMirrorControl
+HttpMirrorControlGui=org.apache.jmeter.protocol.http.control.gui.HttpMirrorControlGui
+# r397955 - removed test class. Keep as commented entry for info only.
+#HTTPNullSampler=org.apache.jmeter.protocol.http.sampler.HTTPNullSampler
+# Merge previous 2 HTTP samplers into one
+HTTPSampler_=org.apache.jmeter.protocol.http.sampler.HTTPSampler
+HTTPSampler2_=org.apache.jmeter.protocol.http.sampler.HTTPSampler2
+HTTPSamplerProxy,HTTPSampler,HTTPSampler2=org.apache.jmeter.protocol.http.sampler.HTTPSamplerProxy
+# Merge GUIs
+HttpTestSampleGui,HttpTestSampleGui2=org.apache.jmeter.protocol.http.control.gui.HttpTestSampleGui
+#HttpTestSampleGui2=org.apache.jmeter.protocol.http.control.gui.HttpTestSampleGui2
+IfController=org.apache.jmeter.control.IfController
+IfControllerPanel=org.apache.jmeter.control.gui.IfControllerPanel
+IncludeController=org.apache.jmeter.control.IncludeController
+IncludeControllerGui=org.apache.jmeter.control.gui.IncludeControllerGui
+InterleaveControl=org.apache.jmeter.control.InterleaveControl
+InterleaveControlGui=org.apache.jmeter.control.gui.InterleaveControlGui
+JavaConfig=org.apache.jmeter.protocol.java.config.JavaConfig
+JavaConfigGui=org.apache.jmeter.protocol.java.config.gui.JavaConfigGui
+JavaSampler=org.apache.jmeter.protocol.java.sampler.JavaSampler
+JavaTest=org.apache.jmeter.protocol.java.test.JavaTest
+JavaTestSamplerGui=org.apache.jmeter.protocol.java.control.gui.JavaTestSamplerGui
+JDBCDataSource=org.apache.jmeter.protocol.jdbc.config.DataSourceElement
+JDBCPostProcessor=org.apache.jmeter.protocol.jdbc.processor.JDBCPostProcessor
+JDBCPreProcessor=org.apache.jmeter.protocol.jdbc.processor.JDBCPreProcessor
+JDBCSampler=org.apache.jmeter.protocol.jdbc.sampler.JDBCSampler
+# Renamed to JMSSamplerGui; keep original entry for backwards compatibility
+JMSConfigGui=org.apache.jmeter.protocol.jms.control.gui.JMSConfigGui
+JMSProperties=org.apache.jmeter.protocol.jms.sampler.JMSProperties
+JMSProperty=org.apache.jmeter.protocol.jms.sampler.JMSProperty
+JMSPublisherGui=org.apache.jmeter.protocol.jms.control.gui.JMSPublisherGui
+JMSSampler=org.apache.jmeter.protocol.jms.sampler.JMSSampler
+JMSSamplerGui=org.apache.jmeter.protocol.jms.control.gui.JMSSamplerGui
+JMSSubscriberGui=org.apache.jmeter.protocol.jms.control.gui.JMSSubscriberGui
+# Removed in r545311 as Jndi no longer present; keep for compat.
+JndiDefaultsGui=org.apache.jmeter.protocol.jms.control.gui.JndiDefaultsGui
+JSR223Assertion=org.apache.jmeter.assertions.JSR223Assertion
+JSR223Listener=org.apache.jmeter.visualizers.JSR223Listener
+JSR223PostProcessor=org.apache.jmeter.extractor.JSR223PostProcessor
+JSR223PreProcessor=org.apache.jmeter.modifiers.JSR223PreProcessor
+JSR223Sampler=org.apache.jmeter.protocol.java.sampler.JSR223Sampler
+JSR223Timer=org.apache.jmeter.timers.JSR223Timer
+JUnitSampler=org.apache.jmeter.protocol.java.sampler.JUnitSampler
+JUnitTestSamplerGui=org.apache.jmeter.protocol.java.control.gui.JUnitTestSamplerGui
+KeystoreConfig=org.apache.jmeter.config.KeystoreConfig
+LDAPArgument=org.apache.jmeter.protocol.ldap.config.gui.LDAPArgument
+LDAPArguments=org.apache.jmeter.protocol.ldap.config.gui.LDAPArguments
+LDAPArgumentsPanel=org.apache.jmeter.protocol.ldap.config.gui.LDAPArgumentsPanel
+LdapConfigGui=org.apache.jmeter.protocol.ldap.config.gui.LdapConfigGui
+LdapExtConfigGui=org.apache.jmeter.protocol.ldap.config.gui.LdapExtConfigGui
+LDAPExtSampler=org.apache.jmeter.protocol.ldap.sampler.LDAPExtSampler
+LdapExtTestSamplerGui=org.apache.jmeter.protocol.ldap.control.gui.LdapExtTestSamplerGui
+LDAPSampler=org.apache.jmeter.protocol.ldap.sampler.LDAPSampler
+LdapTestSamplerGui=org.apache.jmeter.protocol.ldap.control.gui.LdapTestSamplerGui
+LogicControllerGui=org.apache.jmeter.control.gui.LogicControllerGui
+LoginConfig=org.apache.jmeter.config.LoginConfig
+LoginConfigGui=org.apache.jmeter.config.gui.LoginConfigGui
+LoopController=org.apache.jmeter.control.LoopController
+LoopControlPanel=org.apache.jmeter.control.gui.LoopControlPanel
+MailerModel=org.apache.jmeter.reporters.MailerModel
+MailerResultCollector=org.apache.jmeter.reporters.MailerResultCollector
+MailerVisualizer=org.apache.jmeter.visualizers.MailerVisualizer
+MailReaderSampler=org.apache.jmeter.protocol.mail.sampler.MailReaderSampler
+MailReaderSamplerGui=org.apache.jmeter.protocol.mail.sampler.gui.MailReaderSamplerGui
+MD5HexAssertion=org.apache.jmeter.assertions.MD5HexAssertion
+MD5HexAssertionGUI=org.apache.jmeter.assertions.gui.MD5HexAssertionGUI
+ModuleController=org.apache.jmeter.control.ModuleController
+ModuleControllerGui=org.apache.jmeter.control.gui.ModuleControllerGui
+MongoScriptSampler=org.apache.jmeter.protocol.mongodb.sampler.MongoScriptSampler
+MongoSourceElement=org.apache.jmeter.protocol.mongodb.config.MongoSourceElement
+MonitorHealthVisualizer=org.apache.jmeter.visualizers.MonitorHealthVisualizer
+NamePanel=org.apache.jmeter.gui.NamePanel
+ObsoleteGui=org.apache.jmeter.config.gui.ObsoleteGui
+OnceOnlyController=org.apache.jmeter.control.OnceOnlyController
+OnceOnlyControllerGui=org.apache.jmeter.control.gui.OnceOnlyControllerGui
+ParamMask=org.apache.jmeter.protocol.http.modifier.ParamMask
+ParamModifier=org.apache.jmeter.protocol.http.modifier.ParamModifier
+ParamModifierGui=org.apache.jmeter.protocol.http.modifier.gui.ParamModifierGui
+PoissonRandomTimer=org.apache.jmeter.timers.PoissonRandomTimer
+PoissonRandomTimerGui=org.apache.jmeter.timers.gui.PoissonRandomTimerGui
+PropertyControlGui=org.apache.jmeter.visualizers.PropertyControlGui
+ProxyControl=org.apache.jmeter.protocol.http.proxy.ProxyControl
+ProxyControlGui=org.apache.jmeter.protocol.http.proxy.gui.ProxyControlGui
+PublisherSampler=org.apache.jmeter.protocol.jms.sampler.PublisherSampler
+RandomControlGui=org.apache.jmeter.control.gui.RandomControlGui
+RandomController=org.apache.jmeter.control.RandomController
+RandomOrderController=org.apache.jmeter.control.RandomOrderController
+RandomOrderControllerGui=org.apache.jmeter.control.gui.RandomOrderControllerGui
+RandomVariableConfig=org.apache.jmeter.config.RandomVariableConfig
+RecordController=org.apache.jmeter.protocol.http.control.gui.RecordController
+RecordingController=org.apache.jmeter.protocol.http.control.RecordingController
+# removed in r1039684, class was deleted in r580452
+ReflectionThreadGroup=org.apache.jmeter.threads.ReflectionThreadGroup
+RegexExtractor=org.apache.jmeter.extractor.RegexExtractor
+RegexExtractorGui=org.apache.jmeter.extractor.gui.RegexExtractorGui
+RegExUserParameters=org.apache.jmeter.protocol.http.modifier.RegExUserParameters
+RegExUserParametersGui=org.apache.jmeter.protocol.http.modifier.gui.RegExUserParametersGui
+RemoteListenerWrapper=org.apache.jmeter.samplers.RemoteListenerWrapper
+RemoteSampleListenerWrapper=org.apache.jmeter.samplers.RemoteSampleListenerWrapper
+RemoteTestListenerWrapper=org.apache.jmeter.samplers.RemoteTestListenerWrapper
+RemoteThreadsListenerWrapper=org.apache.jmeter.threads.RemoteThreadsListenerWrapper
+ResponseAssertion=org.apache.jmeter.assertions.ResponseAssertion
+RespTimeGraphVisualizer=org.apache.jmeter.visualizers.RespTimeGraphVisualizer
+ResultAction=org.apache.jmeter.reporters.ResultAction
+ResultActionGui=org.apache.jmeter.reporters.gui.ResultActionGui
+ResultCollector=org.apache.jmeter.reporters.ResultCollector
+ResultSaver=org.apache.jmeter.reporters.ResultSaver
+ResultSaverGui=org.apache.jmeter.reporters.gui.ResultSaverGui
+RunTime=org.apache.jmeter.control.RunTime
+RunTimeGui=org.apache.jmeter.control.gui.RunTimeGui
+SampleSaveConfiguration=org.apache.jmeter.samplers.SampleSaveConfiguration
+SimpleConfigGui=org.apache.jmeter.config.gui.SimpleConfigGui
+SimpleDataWriter=org.apache.jmeter.visualizers.SimpleDataWriter
+SizeAssertion=org.apache.jmeter.assertions.SizeAssertion
+SizeAssertionGui=org.apache.jmeter.assertions.gui.SizeAssertionGui
+SMIMEAssertion=org.apache.jmeter.assertions.SMIMEAssertionTestElement
+SMIMEAssertionGui=org.apache.jmeter.assertions.gui.SMIMEAssertionGui
+SmtpSampler=org.apache.jmeter.protocol.smtp.sampler.SmtpSampler
+SmtpSamplerGui=org.apache.jmeter.protocol.smtp.sampler.gui.SmtpSamplerGui
+SoapSampler=org.apache.jmeter.protocol.http.sampler.SoapSampler
+SoapSamplerGui=org.apache.jmeter.protocol.http.control.gui.SoapSamplerGui
+SplineVisualizer=org.apache.jmeter.visualizers.SplineVisualizer
+# Originally deleted in r397955 as class is obsolete; needed for compat.
+SqlConfigGui=org.apache.jmeter.protocol.jdbc.config.gui.SqlConfigGui
+StatGraphVisualizer=org.apache.jmeter.visualizers.StatGraphVisualizer
+StatVisualizer=org.apache.jmeter.visualizers.StatVisualizer
+SubscriberSampler=org.apache.jmeter.protocol.jms.sampler.SubscriberSampler
+SubstitutionElement=org.apache.jmeter.assertions.SubstitutionElement
+Summariser=org.apache.jmeter.reporters.Summariser
+SummariserGui=org.apache.jmeter.reporters.gui.SummariserGui
+SummaryReport=org.apache.jmeter.visualizers.SummaryReport
+SwitchController=org.apache.jmeter.control.SwitchController
+SwitchControllerGui=org.apache.jmeter.control.gui.SwitchControllerGui
+SyncTimer=org.apache.jmeter.timers.SyncTimer
+SystemSampler=org.apache.jmeter.protocol.system.SystemSampler
+SystemSamplerGui=org.apache.jmeter.protocol.system.gui.SystemSamplerGui
+TableVisualizer=org.apache.jmeter.visualizers.TableVisualizer
+TCPConfigGui=org.apache.jmeter.protocol.tcp.config.gui.TCPConfigGui
+TCPSampler=org.apache.jmeter.protocol.tcp.sampler.TCPSampler
+TCPSamplerGui=org.apache.jmeter.protocol.tcp.control.gui.TCPSamplerGui
+TestAction=org.apache.jmeter.sampler.TestAction
+TestActionGui=org.apache.jmeter.sampler.gui.TestActionGui
+TestBeanGUI=org.apache.jmeter.testbeans.gui.TestBeanGUI
+TestFragmentController=org.apache.jmeter.control.TestFragmentController
+TestFragmentControllerGui=org.apache.jmeter.control.gui.TestFragmentControllerGui
+TestPlan=org.apache.jmeter.testelement.TestPlan
+TestPlanGui=org.apache.jmeter.control.gui.TestPlanGui
+ThreadGroup=org.apache.jmeter.threads.ThreadGroup
+ThreadGroupGui=org.apache.jmeter.threads.gui.ThreadGroupGui
+PostThreadGroup=org.apache.jmeter.threads.PostThreadGroup
+PostThreadGroupGui=org.apache.jmeter.threads.gui.PostThreadGroupGui
+SetupThreadGroup=org.apache.jmeter.threads.SetupThreadGroup
+SetupThreadGroupGui=org.apache.jmeter.threads.gui.SetupThreadGroupGui
+ThroughputController=org.apache.jmeter.control.ThroughputController
+ThroughputControllerGui=org.apache.jmeter.control.gui.ThroughputControllerGui
+TransactionController=org.apache.jmeter.control.TransactionController
+TransactionControllerGui=org.apache.jmeter.control.gui.TransactionControllerGui
+TransactionSampler=org.apache.jmeter.control.TransactionSampler
+UniformRandomTimer=org.apache.jmeter.timers.UniformRandomTimer
+UniformRandomTimerGui=org.apache.jmeter.timers.gui.UniformRandomTimerGui
+URLRewritingModifier=org.apache.jmeter.protocol.http.modifier.URLRewritingModifier
+URLRewritingModifierGui=org.apache.jmeter.protocol.http.modifier.gui.URLRewritingModifierGui
+UserParameterModifier=org.apache.jmeter.protocol.http.modifier.UserParameterModifier
+UserParameterModifierGui=org.apache.jmeter.protocol.http.modifier.gui.UserParameterModifierGui
+UserParameters=org.apache.jmeter.modifiers.UserParameters
+UserParametersGui=org.apache.jmeter.modifiers.gui.UserParametersGui
+ViewResultsFullVisualizer=org.apache.jmeter.visualizers.ViewResultsFullVisualizer
+WebServiceSampler=org.apache.jmeter.protocol.http.sampler.WebServiceSampler
+WebServiceSamplerGui=org.apache.jmeter.protocol.http.control.gui.WebServiceSamplerGui
+WhileController=org.apache.jmeter.control.WhileController
+WhileControllerGui=org.apache.jmeter.control.gui.WhileControllerGui
+WorkBench=org.apache.jmeter.testelement.WorkBench
+WorkBenchGui=org.apache.jmeter.control.gui.WorkBenchGui
+XMLAssertion=org.apache.jmeter.assertions.XMLAssertion
+XMLAssertionGui=org.apache.jmeter.assertions.gui.XMLAssertionGui
+XMLSchemaAssertion=org.apache.jmeter.assertions.XMLSchemaAssertion
+XMLSchemaAssertionGUI=org.apache.jmeter.assertions.gui.XMLSchemaAssertionGUI
+XPathAssertion=org.apache.jmeter.assertions.XPathAssertion
+XPathAssertionGui=org.apache.jmeter.assertions.gui.XPathAssertionGui
+XPathExtractor=org.apache.jmeter.extractor.XPathExtractor
+XPathExtractorGui=org.apache.jmeter.extractor.gui.XPathExtractorGui
+#
+# Properties - all start with lower case letter and end with Prop
+#
+boolProp=org.apache.jmeter.testelement.property.BooleanProperty
+collectionProp=org.apache.jmeter.testelement.property.CollectionProperty
+doubleProp=org.apache.jmeter.testelement.property.DoubleProperty
+elementProp=org.apache.jmeter.testelement.property.TestElementProperty
+# see above - already defined as FloatProperty
+#floatProp=org.apache.jmeter.testelement.property.FloatProperty
+intProp=org.apache.jmeter.testelement.property.IntegerProperty
+longProp=org.apache.jmeter.testelement.property.LongProperty
+mapProp=org.apache.jmeter.testelement.property.MapProperty
+objProp=org.apache.jmeter.testelement.property.ObjectProperty
+stringProp=org.apache.jmeter.testelement.property.StringProperty
+#
+# Other - must start with a lower case letter (and not end with Prop)
+# (otherwise they could clash with the initial set of aliases)
+#
+hashTree=org.apache.jorphan.collections.ListedHashTree
+jmeterTestPlan=org.apache.jmeter.save.ScriptWrapper
+sample=org.apache.jmeter.samplers.SampleResult
+httpSample=org.apache.jmeter.protocol.http.sampler.HTTPSampleResult
+statSample=org.apache.jmeter.samplers.StatisticalSampleResult
+testResults=org.apache.jmeter.save.TestResultWrapper
+assertionResult=org.apache.jmeter.assertions.AssertionResult
+monitorStats=org.apache.jmeter.visualizers.MonitorStats
+sampleEvent=org.apache.jmeter.samplers.SampleEvent
+#
+# Converters to register.  Must start line with '_'
+# If the converter is a collection of subitems, set equal to "collection"
+# If the converter needs to know the class mappings but is not a collection of
+#      subitems, set it equal to "mapping"
+_org.apache.jmeter.protocol.http.sampler.HTTPSamplerBaseConverter=collection
+_org.apache.jmeter.protocol.http.util.HTTPResultConverter=collection
+_org.apache.jmeter.save.converters.BooleanPropertyConverter=
+_org.apache.jmeter.save.converters.IntegerPropertyConverter=
+_org.apache.jmeter.save.converters.LongPropertyConverter=
+_org.apache.jmeter.save.converters.MultiPropertyConverter=collection
+_org.apache.jmeter.save.converters.SampleEventConverter=
+_org.apache.jmeter.save.converters.SampleResultConverter=collection
+_org.apache.jmeter.save.converters.SampleSaveConfigurationConverter=collection
+_org.apache.jmeter.save.converters.StringPropertyConverter=
+_org.apache.jmeter.save.converters.HashTreeConverter=collection
+_org.apache.jmeter.save.converters.TestElementConverter=collection
+_org.apache.jmeter.save.converters.TestElementPropertyConverter=collection
+_org.apache.jmeter.save.converters.TestResultWrapperConverter=collection
+_org.apache.jmeter.save.ScriptWrapperConverter=mapping
+#
+#	Remember to update the _version entry
+#
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/AMS-HBASE.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/AMS-HBASE.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/AMS-HBASE.dat
new file mode 100644
index 0000000..63ac9f3
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/AMS-HBASE.dat
@@ -0,0 +1,18 @@
+dfs.FSNamesystem.MissingReplOneBlocks
+dfs.FSNamesystem.TransactionsSinceLastCheckpoint
+dfs.FSNamesystem.MillisSinceLastLoadedEdits
+dfs.FSNamesystem.SnapshottableDirectories
+master.Master.QueueCallTime_median
+dfs.FSNamesystem.LastCheckpointTime
+dfs.FSNamesystem.TotalFiles
+dfs.FSNamesystem.ExpiredHeartbeats
+dfs.FSNamesystem.PostponedMisreplicatedBlocks
+dfs.FSNamesystem.LastWrittenTransactionId
+jvm.JvmMetrics.MemHeapCommittedM
+dfs.FSNamesystem.Snapshots
+dfs.FSNamesystem.TransactionsSinceLastLogRoll
+master.Server.averageLoad
+jvm.JvmMetrics.MemHeapUsedM
+master.AssignmentManger.ritCount
+dfs.FSNamesystem.PendingDataNodeMessageCount
+dfs.FSNamesystem.StaleDataNodes
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/FLUME_HANDLER.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/FLUME_HANDLER.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/FLUME_HANDLER.dat
new file mode 100644
index 0000000..bd5852f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/metrics_def/FLUME_HANDLER.dat
@@ -0,0 +1,40 @@
+ChannelSize._rate._min._sum
+ChannelSize._rate._sum._max
+ChannelSize._rate._min._max
+ChannelSize._rate._avg._min
+ChannelSize._rate._avg._avg
+ChannelSize._rate._max
+ChannelSize._rate._max._min
+ChannelSize._rate._max._avg
+ChannelSize._rate._avg._sum
+ChannelSize._rate._max._sum
+ChannelSize._rate._sum
+ChannelSize._rate._sum._min
+ChannelSize._rate._sum._avg
+ChannelSize._rate._min._avg
+ChannelSize._rate._min._min
+ChannelSize._rate._avg._max
+ChannelSize._rate._max._max
+ChannelSize._rate._avg
+ChannelSize._rate._min
+ChannelSize._rate._sum._sum
+EventPutSuccessCount._rate._avg._sum
+EventPutSuccessCount._rate._max._sum
+EventPutSuccessCount._rate._sum._sum
+EventPutSuccessCount._rate._max._max
+EventPutSuccessCount._rate._min._avg
+EventPutSuccessCount._rate._min._min
+EventPutSuccessCount._rate._avg._max
+EventPutSuccessCount._rate._sum._min
+EventPutSuccessCount._rate._sum._avg
+EventPutSuccessCount._rate._min._sum
+EventPutSuccessCount._rate._max
+EventPutSuccessCount._rate._max._avg
+EventPutSuccessCount._rate._avg._avg
+EventPutSuccessCount._rate._max._min
+EventPutSuccessCount._rate._avg._min
+EventPutSuccessCount._rate._avg
+EventPutSuccessCount._rate._min
+EventPutSuccessCount._rate._sum._max
+EventPutSuccessCount._rate._min._max
+EventPutSuccessCount._rate._sum


[02/50] [abbrv] ambari git commit: AMBARI-13500. "Hadoop Group" is absent on Customize Services -> Misc tab

Posted by nc...@apache.org.
AMBARI-13500. "Hadoop Group" is absent on Customize Services -> Misc tab


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2ff2d2fb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2ff2d2fb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2ff2d2fb

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 2ff2d2fb3b9f583b35b95aa15bbf18dfa1214c6c
Parents: b60b3af
Author: Alex Antonenko <hi...@gmail.com>
Authored: Wed Oct 21 19:00:13 2015 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Wed Oct 21 20:02:34 2015 +0300

----------------------------------------------------------------------
 .../resources/stacks/BIGTOP/0.8/configuration/cluster-env.xml    | 4 ++--
 .../resources/stacks/HDP/2.0.6/configuration/cluster-env.xml     | 2 +-
 .../resources/stacks/HDPWIN/2.1/configuration/cluster-env.xml    | 2 +-
 .../stacks/HDP/0.2/services/HDFS/configuration/hadoop-env.xml    | 4 ++--
 .../HDP/0.2/services/HDFS/configuration/hadoop-env.xml           | 4 ++--
 5 files changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2ff2d2fb/ambari-server/src/main/resources/stacks/BIGTOP/0.8/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/configuration/cluster-env.xml
index b935a1a..c87df1b 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/configuration/cluster-env.xml
@@ -53,9 +53,9 @@
     </property>
     <property>
         <name>user_group</name>
-        <display-name>User Group</display-name>
+        <display-name>Hadoop Group</display-name>
         <value>hadoop</value>
         <property-type>GROUP</property-type>
         <description>Hadoop user group.</description>
     </property>
-</configuration>
\ No newline at end of file
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2ff2d2fb/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index c66bc12..805aa29 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -56,7 +56,7 @@
     </property>
     <property>
         <name>user_group</name>
-        <display-name>User Group</display-name>
+        <display-name>Hadoop Group</display-name>
         <value>hadoop</value>
         <property-type>GROUP</property-type>
         <description>Hadoop user group.</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2ff2d2fb/ambari-server/src/main/resources/stacks/HDPWIN/2.1/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/configuration/cluster-env.xml
index 211795d..a4d8acc 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/configuration/cluster-env.xml
@@ -77,7 +77,7 @@
   </property>
   <property>
     <name>user_group</name>
-    <display-name>User Group</display-name>
+    <display-name>Hadoop Group</display-name>
     <value>hadoop</value>
     <property-type>GROUP</property-type>
     <description>Hadoop user group.</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2ff2d2fb/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
index a074faf..26f90c8 100644
--- a/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
@@ -101,7 +101,7 @@
   </property>
   <property>
     <name>user_group</name>
-    <display-name>User Group</display-name>
+    <display-name>Hadoop Group</display-name>
     <value>hadoop</value>
     <description>Proxy user group.</description>
   </property>
@@ -224,4 +224,4 @@ export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-a
     </value>
   </property>
   
-</configuration>
\ No newline at end of file
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2ff2d2fb/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
index f094233..4677e57 100644
--- a/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
@@ -100,7 +100,7 @@
   </property>
   <property>
     <name>user_group</name>
-    <display-name>User Group</display-name>
+    <display-name>Hadoop Group</display-name>
     <value>hadoop</value>
     <description>Proxy user group.</description>
   </property>
@@ -223,4 +223,4 @@ export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-a
     </value>
   </property>
   
-</configuration>
\ No newline at end of file
+</configuration>


[18/50] [abbrv] ambari git commit: AMBARI-13522. Override button for Flume is absent after first overriding (UI). (akovalenko)

Posted by nc...@apache.org.
AMBARI-13522. Override button for Flume is absent after first overriding (UI). (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1f702fbf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1f702fbf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1f702fbf

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 1f702fbf922a64b81efc2c70496afa8dba8ae3ab
Parents: 96fecce
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Thu Oct 22 15:17:38 2015 +0300
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Thu Oct 22 15:21:21 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/mixins/common/configs/configs_loader.js | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1f702fbf/ambari-web/app/mixins/common/configs/configs_loader.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/configs/configs_loader.js b/ambari-web/app/mixins/common/configs/configs_loader.js
index 4e64fb3..fdcd171 100644
--- a/ambari-web/app/mixins/common/configs/configs_loader.js
+++ b/ambari-web/app/mixins/common/configs/configs_loader.js
@@ -98,6 +98,7 @@ App.ConfigsLoader = Em.Mixin.create(App.GroupsMappingMixin, {
   loadCurrentVersions: function() {
     this.set('isCompareMode', false);
     this.set('versionLoaded', false);
+    this.set('selectedVersion', this.get('currentDefaultVersion'));
     this.trackRequest(App.ajax.send({
       name: 'service.serviceConfigVersions.get.current',
       sender: this,


[46/50] [abbrv] ambari git commit: AMBARI-13524 - Change a Request In Progress To Skip Errors Automatically (jonathanhurley)

Posted by nc...@apache.org.
AMBARI-13524 - Change a Request In Progress To Skip Errors Automatically (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/502dc186
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/502dc186
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/502dc186

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 502dc1862d974eb4c072864db0981a11bdfe4f66
Parents: a61a83f
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Thu Oct 22 08:41:27 2015 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Oct 23 08:04:09 2015 -0400

----------------------------------------------------------------------
 .../internal/UpgradeResourceProvider.java       | 147 ++++++++++++++-----
 .../server/orm/dao/HostRoleCommandDAO.java      |  67 +++++++++
 .../orm/entities/HostRoleCommandEntity.java     |   4 +-
 .../server/orm/dao/HostRoleCommandDAOTest.java  |  40 +++++
 4 files changed, 221 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/502dc186/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 78c36f8..044c707 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -21,6 +21,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOL
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.VERSION;
 
+import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -102,10 +103,12 @@ import org.apache.ambari.server.state.stack.upgrade.TaskWrapper;
 import org.apache.ambari.server.state.stack.upgrade.UpdateStackGrouping;
 import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostServerActionEvent;
+import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.collect.Lists;
 import com.google.gson.Gson;
 import com.google.inject.Inject;
 import com.google.inject.Provider;
@@ -387,57 +390,68 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     // !!! above check ensures only one
     final Map<String, Object> propertyMap = requestMaps.iterator().next();
 
-    String requestId = (String) propertyMap.get(UPGRADE_REQUEST_ID);
-    if (null == requestId) {
+    String requestIdProperty = (String) propertyMap.get(UPGRADE_REQUEST_ID);
+    if (null == requestIdProperty) {
       throw new IllegalArgumentException(String.format("%s is required", UPGRADE_REQUEST_ID));
     }
 
-    String requestStatus = (String) propertyMap.get(UPGRADE_REQUEST_STATUS);
-    if (null == requestStatus) {
-      throw new IllegalArgumentException(String.format("%s is required", UPGRADE_REQUEST_STATUS));
+    long requestId = Long.parseLong(requestIdProperty);
+    UpgradeEntity upgradeEntity = s_upgradeDAO.findUpgradeByRequestId(requestId);
+    if( null == upgradeEntity){
+      String exceptionMessage = MessageFormat.format("The upgrade with request ID {0} was not found", requestIdProperty);
+      throw new NoSuchParentResourceException(exceptionMessage);
     }
 
-    HostRoleStatus status = HostRoleStatus.valueOf(requestStatus);
-    if (status != HostRoleStatus.ABORTED && status != HostRoleStatus.PENDING) {
-      throw new IllegalArgumentException(String.format("Cannot set status %s, only %s is allowed",
-          status, EnumSet.of(HostRoleStatus.ABORTED, HostRoleStatus.PENDING)));
-    }
+    // the properties which are allowed to be updated; the request must include
+    // at least 1
+    List<String> updatableProperties = Lists.newArrayList(UPGRADE_REQUEST_STATUS,
+        UPGRADE_SKIP_FAILURES, UPGRADE_SKIP_SC_FAILURES);
 
-    String reason = (String) propertyMap.get(UPGRADE_ABORT_REASON);
-    if (null == reason) {
-      reason = String.format(DEFAULT_REASON_TEMPLATE, requestId);
-    }
+    boolean isRequiredPropertyInRequest = CollectionUtils.containsAny(updatableProperties,
+        propertyMap.keySet());
 
-    ActionManager actionManager = getManagementController().getActionManager();
-    List<org.apache.ambari.server.actionmanager.Request> requests = actionManager.getRequests(
-        Collections.singletonList(Long.valueOf(requestId)));
-
-    org.apache.ambari.server.actionmanager.Request internalRequest = requests.get(0);
+    if (!isRequiredPropertyInRequest) {
+      String exceptionMessage = MessageFormat.format(
+          "At least one of the following properties is required in the request: {0}",
+          StringUtils.join(updatableProperties, ", "));
+      throw new IllegalArgumentException(exceptionMessage);
+    }
 
-    HostRoleStatus internalStatus = CalculatedStatus.statusFromStages(
-        internalRequest.getStages()).getStatus();
+    String requestStatus = (String) propertyMap.get(UPGRADE_REQUEST_STATUS);
+    String skipFailuresRequestProperty = (String) propertyMap.get(UPGRADE_SKIP_FAILURES);
+    String skipServiceCheckFailuresRequestProperty = (String) propertyMap.get(UPGRADE_SKIP_SC_FAILURES);
 
-    if (HostRoleStatus.PENDING == status && internalStatus != HostRoleStatus.ABORTED) {
-      throw new IllegalArgumentException(
-          String.format("Can only set status to %s when the upgrade is %s (currently %s)", status,
-              HostRoleStatus.ABORTED, internalStatus));
+    if (null != requestStatus) {
+      HostRoleStatus status = HostRoleStatus.valueOf(requestStatus);
+      setUpgradeRequestStatus(requestIdProperty, status, propertyMap);
     }
 
-    if (HostRoleStatus.ABORTED == status) {
-      if (!internalStatus.isCompletedState()) {
-        actionManager.cancelRequest(internalRequest.getRequestId(), reason);
+    // if either of the skip failure settings are in the request, then we need
+    // to iterate over the entire series of tasks anyway, so do them both at the
+    // same time
+    if (StringUtils.isNotEmpty(skipFailuresRequestProperty)
+        || StringUtils.isNotEmpty(skipServiceCheckFailuresRequestProperty)) {
+      // grab the current settings for both
+      boolean skipFailures = upgradeEntity.isComponentFailureAutoSkipped();
+      boolean skipServiceCheckFailures = upgradeEntity.isServiceCheckFailureAutoSkipped();
+
+      // update skipping failures on commands which are not SERVICE_CHECKs
+      if (null != skipFailuresRequestProperty) {
+        skipFailures = Boolean.parseBoolean(skipFailuresRequestProperty);
+        s_hostRoleCommandDAO.updateAutomaticSkipOnFailure(requestId, skipFailures);
       }
-    } else {
-      List<Long> taskIds = new ArrayList<Long>();
 
-      for (HostRoleCommand hrc : internalRequest.getCommands()) {
-        if (HostRoleStatus.ABORTED == hrc.getStatus()
-            || HostRoleStatus.TIMEDOUT == hrc.getStatus()) {
-          taskIds.add(hrc.getTaskId());
-        }
+      // if the service check failure skip is present, then update all role
+      // commands that are SERVICE_CHECKs
+      if (null != skipServiceCheckFailuresRequestProperty) {
+        skipServiceCheckFailures = Boolean.parseBoolean(skipServiceCheckFailuresRequestProperty);
+        s_hostRoleCommandDAO.updateAutomaticSkipServiceCheckFailure(requestId,
+            skipServiceCheckFailures);
       }
 
-      actionManager.resubmitTasks(taskIds);
+      upgradeEntity.setAutoSkipComponentFailures(skipFailures);
+      upgradeEntity.setAutoSkipServiceCheckFailures(skipServiceCheckFailures);
+      upgradeEntity = s_upgradeDAO.merge(upgradeEntity);
     }
 
     return getRequestStatus(null);
@@ -1449,4 +1463,65 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     parameters.put(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION, "*");
     return parameters;
   }
+
+  /**
+   * Changes the status of the specified request for an upgrade. The valid
+   * values are:
+   * <ul>
+   * <li>{@link HostRoleStatus#ABORTED}</li>
+   * <li>{@link HostRoleStatus#PENDING}</li>
+   * </ul>
+   *
+   * @param requestId
+   *          the request to change the status for.
+   * @param status
+   *          the status to set
+   * @param propertyMap
+   *          the map of request properties (needed for things like abort reason
+   *          if present)
+   */
+  private void setUpgradeRequestStatus(String requestId, HostRoleStatus status,
+      Map<String, Object> propertyMap) {
+    if (status != HostRoleStatus.ABORTED && status != HostRoleStatus.PENDING) {
+      throw new IllegalArgumentException(String.format("Cannot set status %s, only %s is allowed",
+          status, EnumSet.of(HostRoleStatus.ABORTED, HostRoleStatus.PENDING)));
+    }
+
+    String reason = (String) propertyMap.get(UPGRADE_ABORT_REASON);
+    if (null == reason) {
+      reason = String.format(DEFAULT_REASON_TEMPLATE, requestId);
+    }
+
+    ActionManager actionManager = getManagementController().getActionManager();
+    List<org.apache.ambari.server.actionmanager.Request> requests = actionManager.getRequests(
+        Collections.singletonList(Long.valueOf(requestId)));
+
+    org.apache.ambari.server.actionmanager.Request internalRequest = requests.get(0);
+
+    HostRoleStatus internalStatus = CalculatedStatus.statusFromStages(
+        internalRequest.getStages()).getStatus();
+
+    if (HostRoleStatus.PENDING == status && internalStatus != HostRoleStatus.ABORTED) {
+      throw new IllegalArgumentException(
+          String.format("Can only set status to %s when the upgrade is %s (currently %s)", status,
+              HostRoleStatus.ABORTED, internalStatus));
+    }
+
+    if (HostRoleStatus.ABORTED == status) {
+      if (!internalStatus.isCompletedState()) {
+        actionManager.cancelRequest(internalRequest.getRequestId(), reason);
+      }
+    } else {
+      List<Long> taskIds = new ArrayList<Long>();
+
+      for (HostRoleCommand hrc : internalRequest.getCommands()) {
+        if (HostRoleStatus.ABORTED == hrc.getStatus()
+            || HostRoleStatus.TIMEDOUT == hrc.getStatus()) {
+          taskIds.add(hrc.getTaskId());
+        }
+      }
+
+      actionManager.resubmitTasks(taskIds);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/502dc186/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
index 70e2940..14af03d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
@@ -32,6 +32,7 @@ import java.util.Map;
 import javax.persistence.EntityManager;
 import javax.persistence.TypedQuery;
 
+import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.orm.RequiresSession;
 import org.apache.ambari.server.orm.entities.HostEntity;
@@ -474,4 +475,70 @@ public class HostRoleCommandDAO {
 
     return map;
   }
+
+  /**
+   * Updates the {@link HostRoleCommandEntity#isFailureAutoSkipped()} flag for
+   * all commands which are not {@link RoleCommand#SERVICE_CHECK}.
+   * <p/>
+   * This will execute a JPQL {@code UPDATE} statement, bypassing the
+   * {@link EntityManager}. It does this because the amount of
+   * {@link HostRoleCommandEntity}s could number in the 10's of 1000's. As a
+   * result, this will call {@link EntityManager#clear()} after the update to
+   * ensure that the updated entity state is reflected in future queries.
+   *
+   * @param requestId
+   *          the request ID of the commands to update
+   * @param automaticallySkipOnFailure
+   *          {@code true} to automatically skip failures, {@code false}
+   *          otherwise.
+   * @see HostRoleCommandDAO#updateAutomaticSkipServiceCheckFailure(long,
+   *      boolean)
+   */
+  @Transactional
+  public void updateAutomaticSkipOnFailure(long requestId, boolean automaticallySkipOnFailure) {
+    EntityManager entityManager = entityManagerProvider.get();
+
+    TypedQuery<HostRoleCommandEntity> query = entityManager.createNamedQuery(
+        "HostRoleCommandEntity.updateAutoSkipExcludeRoleCommand", HostRoleCommandEntity.class);
+
+    query.setParameter("requestId", requestId);
+    query.setParameter("roleCommand", RoleCommand.SERVICE_CHECK);
+    query.setParameter("autoSkipOnFailure", automaticallySkipOnFailure ? 1 : 0);
+    query.executeUpdate();
+
+    entityManager.clear();
+  }
+
+  /**
+   * Updates the {@link HostRoleCommandEntity#isFailureAutoSkipped()} flag for
+   * all commands which are of type {@link RoleCommand#SERVICE_CHECK}.
+   * <p/>
+   * This will execute a JPQL {@code UPDATE} statement, bypassing the
+   * {@link EntityManager}. It does this because the amount of
+   * {@link HostRoleCommandEntity}s could number in the 10's of 1000's. As a
+   * result, this will call {@link EntityManager#clear()} after the update to
+   * ensure that the updated entity state is reflected in future queries.
+   *
+   * @param requestId
+   *          the request ID of the service check commands to update
+   * @param automaticallySkipOnFailure
+   *          {@code true} to automatically skip service check failures,
+   *          {@code false} otherwise.
+   * @see HostRoleCommandDAO#updateAutomaticSkipOnFailure(long, boolean)
+   */
+  @Transactional
+  public void updateAutomaticSkipServiceCheckFailure(long requestId,
+      boolean automaticallySkipOnFailure) {
+    EntityManager entityManager = entityManagerProvider.get();
+
+    TypedQuery<HostRoleCommandEntity> query = entityManager.createNamedQuery(
+        "HostRoleCommandEntity.updateAutoSkipForRoleCommand", HostRoleCommandEntity.class);
+
+    query.setParameter("requestId", requestId);
+    query.setParameter("roleCommand", RoleCommand.SERVICE_CHECK);
+    query.setParameter("autoSkipOnFailure", automaticallySkipOnFailure ? 1 : 0);
+    query.executeUpdate();
+
+    entityManager.clear();
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/502dc186/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java
index e0662fb..af71c40 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java
@@ -60,7 +60,9 @@ import org.apache.commons.lang.ArrayUtils;
     @NamedQuery(name = "HostRoleCommandEntity.findByHostId", query = "SELECT command FROM HostRoleCommandEntity command WHERE command.hostId=:hostId"),
     @NamedQuery(name = "HostRoleCommandEntity.findByHostRole", query = "SELECT command FROM HostRoleCommandEntity command WHERE command.hostEntity.hostName=:hostName AND command.requestId=:requestId AND command.stageId=:stageId AND command.role=:role ORDER BY command.taskId"),
     @NamedQuery(name = "HostRoleCommandEntity.findByHostRoleNullHost", query = "SELECT command FROM HostRoleCommandEntity command WHERE command.hostEntity IS NULL AND command.requestId=:requestId AND command.stageId=:stageId AND command.role=:role"),
-    @NamedQuery(name = "HostRoleCommandEntity.findByStatusBetweenStages", query = "SELECT command FROM HostRoleCommandEntity command WHERE command.requestId = :requestId AND command.stageId >= :minStageId AND command.stageId <= :maxStageId AND command.status = :status")
+    @NamedQuery(name = "HostRoleCommandEntity.findByStatusBetweenStages", query = "SELECT command FROM HostRoleCommandEntity command WHERE command.requestId = :requestId AND command.stageId >= :minStageId AND command.stageId <= :maxStageId AND command.status = :status"),
+    @NamedQuery(name = "HostRoleCommandEntity.updateAutoSkipExcludeRoleCommand", query = "UPDATE HostRoleCommandEntity command SET command.autoSkipOnFailure = :autoSkipOnFailure WHERE command.requestId = :requestId AND command.roleCommand <> :roleCommand"),
+    @NamedQuery(name = "HostRoleCommandEntity.updateAutoSkipForRoleCommand", query = "UPDATE HostRoleCommandEntity command SET command.autoSkipOnFailure = :autoSkipOnFailure WHERE command.requestId = :requestId AND command.roleCommand = :roleCommand")
 })
 public class HostRoleCommandEntity {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/502dc186/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAOTest.java
index 1fded28..d7e9149 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAOTest.java
@@ -109,6 +109,46 @@ public class HostRoleCommandDAOTest {
   }
 
   /**
+   * Tests updating various commands to be skipped on failures automatically.
+   */
+  @Test
+  public void testUpdateAutoSkipOnFailures() {
+    OrmTestHelper helper = m_injector.getInstance(OrmTestHelper.class);
+    helper.createDefaultData();
+
+    Long requestId = Long.valueOf(100L);
+    ClusterEntity clusterEntity = m_clusterDAO.findByName("test_cluster1");
+
+    RequestEntity requestEntity = new RequestEntity();
+    requestEntity.setRequestId(requestId);
+    requestEntity.setClusterId(clusterEntity.getClusterId());
+    requestEntity.setStages(new ArrayList<StageEntity>());
+    m_requestDAO.create(requestEntity);
+
+    AtomicLong stageId = new AtomicLong(1);
+    HostEntity host = m_hostDAO.findByName("test_host1");
+    host.setHostRoleCommandEntities(new ArrayList<HostRoleCommandEntity>());
+
+    createStage(stageId.getAndIncrement(), 3, host, requestEntity, HostRoleStatus.PENDING, false);
+    createStage(stageId.getAndIncrement(), 2, host, requestEntity, HostRoleStatus.PENDING, false);
+    createStage(stageId.getAndIncrement(), 1, host, requestEntity, HostRoleStatus.PENDING, false);
+
+    List<HostRoleCommandEntity> tasks = m_hostRoleCommandDAO.findByRequest(requestId);
+    Assert.assertEquals(6, tasks.size());
+
+    for (HostRoleCommandEntity task : tasks) {
+      Assert.assertFalse(task.isFailureAutoSkipped());
+    }
+
+    m_hostRoleCommandDAO.updateAutomaticSkipOnFailure(requestId, true);
+    tasks = m_hostRoleCommandDAO.findByRequest(requestId);
+
+    for (HostRoleCommandEntity task : tasks) {
+      Assert.assertTrue(task.isFailureAutoSkipped());
+    }
+  }
+
+  /**
    * Creates a single stage with the specified number of commands.
    *
    * @param startStageId


[17/50] [abbrv] ambari git commit: AMBARI-13519. Disk Usage widget displays not all metrics (onechiporenko)

Posted by nc...@apache.org.
AMBARI-13519. Disk Usage widget displays not all metrics (onechiporenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/96fecce1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/96fecce1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/96fecce1

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 96fecce1a350b1ba71c07438782883a3d17f509c
Parents: e652b35
Author: Oleg Nechiporenko <on...@apache.org>
Authored: Thu Oct 22 13:10:47 2015 +0300
Committer: Oleg Nechiporenko <on...@apache.org>
Committed: Thu Oct 22 13:18:36 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/views/main/host/metrics/disk.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/96fecce1/ambari-web/app/views/main/host/metrics/disk.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/host/metrics/disk.js b/ambari-web/app/views/main/host/metrics/disk.js
index 4b8f40f..447fbd8 100644
--- a/ambari-web/app/views/main/host/metrics/disk.js
+++ b/ambari-web/app/views/main/host/metrics/disk.js
@@ -36,7 +36,7 @@ App.ChartHostMetricsDisk = App.ChartLinearTimeView.extend({
 
   loadGroup: {
     name: 'host.metrics.aggregated',
-    fields: ['metrics/disk/disk_total']
+    fields: ['metrics/disk/disk_total', 'metrics/disk/disk_free']
   },
 
   transformToSeries: function (jsonData) {


[16/50] [abbrv] ambari git commit: AMBARI-13407. Express Upgrade: Create other Upgrade Packs and Config Packs (dlysnichenko)

Posted by nc...@apache.org.
AMBARI-13407. Express Upgrade: Create other Upgrade Packs and Config Packs (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e652b358
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e652b358
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e652b358

Branch: refs/heads/branch-dev-patch-upgrade
Commit: e652b358748f8785622f5e6f50002f620a907678
Parents: 5eff797
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Thu Oct 22 11:34:18 2015 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Thu Oct 22 11:35:26 2015 +0300

----------------------------------------------------------------------
 .../server/checks/ClientRetryPropertyCheck.java |   2 +-
 .../HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml |  89 +++
 .../HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml | 680 +++++++++++++++++++
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml | 519 ++++++++++++++
 4 files changed, 1289 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e652b358/ambari-server/src/main/java/org/apache/ambari/server/checks/ClientRetryPropertyCheck.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ClientRetryPropertyCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ClientRetryPropertyCheck.java
index 368bcb8..257d575 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/ClientRetryPropertyCheck.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ClientRetryPropertyCheck.java
@@ -37,7 +37,7 @@ import com.google.inject.Singleton;
  * client retry properties for HDFS, HIVE, and OOZIE are set.
  */
 @Singleton
-@UpgradeCheck(group = UpgradeCheckGroup.CLIENT_RETRY_PROPERTY, required = true)
+@UpgradeCheck(group = UpgradeCheckGroup.CLIENT_RETRY_PROPERTY, required = false)
 public class ClientRetryPropertyCheck extends AbstractCheckDescriptor {
 
   static final String HIVE_CLIENT_RETRY_MISSING_KEY = "hive.client.retry.missing.key";

http://git-wip-us.apache.org/repos/asf/ambari/blob/e652b358/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
index c81b1ea..a23fe3d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
@@ -182,10 +182,99 @@
       <direction>UPGRADE</direction>   <!--  prevent config changes on downgrade -->
       <skippable>false</skippable>
 
+      <!--Changes for stack 2.2-->
       <!--YARN-->
       <execute-stage service="MAPREDUCE2" component="HISTORYSERVER">
         <task xsi:type="configure" id="hdp_2_2_0_0_historyserver_classpath"/>
       </execute-stage>
+
+
+      <!--Changes for stack 2.3-->
+      <!--HDFS-->
+      <execute-stage service="HDFS" component="NAMENODE">
+        <task xsi:type="configure" id="hdp_2_3_0_0_modify_hadoop_env"/>
+      </execute-stage>
+
+
+      <!--YARN-->
+      <execute-stage service="MAPREDUCE2" component="HISTORYSERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_mapreduce2_adjust_history_server"/>
+      </execute-stage>
+
+      <execute-stage service="YARN" component="APP_TIMELINE_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_yarn_ats_enable_recovery"/>
+      </execute-stage>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_disable_node_labels"/>
+      </execute-stage>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_clear_default_node_label_expression"/>
+      </execute-stage>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_check_cs_root_def_capacity"/>
+      </execute-stage>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_check_cs_root_max_capacity"/>
+      </execute-stage>
+
+
+      <!--HBASE-->
+      <execute-stage service="HBASE" component="HBASE_MASTER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_set_global_memstore_size"/>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER">
+        <task xsi:type="server_action" summary="Calculating HBase Properties" class="org.apache.ambari.server.serveraction.upgrades.HBaseConfigCalculation" />
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_adjust_authorization_coprocessors"/>
+      </execute-stage>
+
+
+      <!--TEZ-->
+      <execute-stage service="TEZ" component="TEZ_CLIENT">
+        <task xsi:type="configure" id="hdp_2_3_0_0_tez_client_adjust_properties"/>
+      </execute-stage>
+
+
+      <!--HIVE-->
+      <execute-stage service="HIVE" component="HIVE_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_replace_auth_manager"/>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_authentification"/>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="WEBHCAT_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_webhcat_server_update_env"/>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="WEBHCAT_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_webhcat_server_update_configuration_paths"/>
+      </execute-stage>
+
+
+      <!--OOZIE-->
+      <execute-stage service="OOZIE" component="OOZIE_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_oozie_remove_redundant_configurations"/>
+      </execute-stage>
+
+
+      <!--STORM-->
+      <execute-stage service="STORM" component="NIMBUS">
+        <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_convert_nimbus_host_to_seeds"/>
+      </execute-stage>
+
+      <execute-stage service="STORM" component="NIMBUS">
+        <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_update_env_vars"/>
+      </execute-stage>
+
     </group>
 
     <!-- After processing this group, will change the effective Stack of the UpgradeContext object. -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/e652b358/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
new file mode 100644
index 0000000..d71387a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
@@ -0,0 +1,680 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+
+<upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+  <target>2.3.*.*</target>
+  <target-stack>HDP-2.3</target-stack>
+  <type>NON_ROLLING</type>
+  <downgrade-allowed>false</downgrade-allowed>
+  <prechecks>
+    <!--TODO: do we have any?-->
+  </prechecks>
+  <order>
+    <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
+      <skippable>true</skippable>
+      <direction>UPGRADE</direction>
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <task xsi:type="manual">
+          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="SLIDER" component="SLIDER" title="Stop Long Running Applications on Slider">
+        <task xsi:type="manual">
+          <message>Before continuing, please stop all long-running applications deployed using Slider. E.g., su - yarn "/usr/hdp/current/slider-client/bin/slider stop &lt;app_name&gt;"</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="STORM" component="NIMBUS" title="Stop Storm Topologies">
+        <task xsi:type="manual">
+          <message>Before continuing, please deactivate and kill any currently running topologies.</message>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="stop" name="Stop High-Level Daemons" title="Stop Daemons for High-Level Services">
+      <skippable>true</skippable>
+      <service-check>false</service-check>
+
+      <service name="FLUME">
+        <component>FLUME_HANDLER</component>
+      </service>
+
+      <service name="STORM">
+        <component>DRPC_SERVER</component>
+        <component>STORM_UI_SERVER</component>
+        <component>SUPERVISOR</component>
+        <component>STORM_REST_API</component>
+        <component>NIMBUS</component>
+      </service>
+
+      <service name="KNOX">
+        <component>KNOX_GATEWAY</component>
+      </service>
+
+      <service name="KAFKA">
+        <component>KAFKA_BROKER</component>
+      </service>
+
+      <service name="FALCON">
+        <component>FALCON_SERVER</component>
+      </service>
+
+      <service name="OOZIE">
+        <component>OOZIE_SERVER</component>
+      </service>
+
+      <service name="SPARK">
+        <component>SPARK_JOBHISTORYSERVER</component>
+      </service>
+
+      <service name="HIVE">
+        <component>WEBHCAT_SERVER</component>
+        <component>HIVE_SERVER</component>
+        <component>HIVE_METASTORE</component>
+      </service>
+
+      <service name="YARN">
+        <component>NODEMANAGER</component>
+        <component>RESOURCEMANAGER</component>
+        <component>APP_TIMELINE_SERVER</component>
+      </service>
+
+      <service name="MAPREDUCE2">
+        <component>HISTORYSERVER</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="Backups" title="Take Backups">
+      <direction>UPGRADE</direction>
+      <skippable>true</skippable>
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Backup Oozie Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Oozie Server database on {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_METASTORE" title="Backup Hive Metastore">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Hive Metastore database located on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <!--TODO(dlysnichenko): This step is missing in 2.3->2.3+ RU pack -->
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Backup Knox Data">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Knox data. E.g., "cp -RL /etc/knox/data/security ~/knox_backup" on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot HBASE">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/hbase_upgrade.py</script>
+          <function>take_snapshot</function>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Snapshot HDFS">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/namenode.py</script>
+          <function>prepare_non_rolling_upgrade</function>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Backup Ranger">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Ranger Admin database and Ranger Audit database on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <!--TODO: Ranger KMS seems to be absent at 2.2 stack, not backing it up-->
+    </group>
+
+    <group xsi:type="stop" name="Stop Low-Level Daemons" title="Stop Daemons for Low-Level Services">
+      <skippable>true</skippable>
+      <service-check>false</service-check>
+
+      <service name="HBASE">
+        <component>HBASE_REGIONSERVER</component>
+        <component>HBASE_MASTER</component>
+      </service>
+
+      <service name="HDFS">
+        <component>DATANODE</component>
+        <component>NAMENODE</component>
+        <component>SECONDARY_NAMENODE</component>
+        <component>ZKFC</component>
+        <component>JOURNALNODE</component>
+      </service>
+
+      <service name="RANGER">
+        <component>RANGER_USERSYNC</component>
+        <component>RANGER_ADMIN</component>
+      </service>
+
+      <service name="ZOOKEEPER">
+        <component>ZOOKEEPER_SERVER</component>
+      </service>
+    </group>
+
+    <group name="Marker for Downgrade" title="Marker for Downgrade">
+      <direction>UPGRADE</direction>
+      <!-- TODO, if the user attempts a downgrade before this step, they can simply abort. -->
+    </group>
+
+    <group xsi:type="cluster" name="Restore Backups" title="Restore Backups">
+      <direction>DOWNGRADE</direction>
+      <skippable>true</skippable>
+
+      <!-- If the user attempts a downgrade after this point, they will need to restore backups
+      before starting any of the services. -->
+
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Backup Oozie Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please restore the Oozie Server database on {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_METASTORE" title="Backup Hive Metastore">
+        <task xsi:type="manual">
+          <message>Before continuing, please restore the Hive Metastore database located on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Backup Knox Data">
+        <task xsi:type="manual">
+          <message>Before continuing, please restore the Knox data. E.g., "cp -RL ~/knox_backup/* /etc/knox/data/security/" on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot HBASE">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/hbase_upgrade.py</script>
+          <function>restore_snapshot</function>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Snapshot HDFS">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/namenode.py</script>
+          <function>restore_snapshot</function>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Backup Ranger">
+        <task xsi:type="manual">
+          <message>Before continuing, please restore the Ranger Admin database and Ranger Audit database on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <!--TODO: Ranger KMS seems to be absent at 2.2 stack, not backing it up-->
+    </group>
+
+    <group xsi:type="cluster" name="Upgrade service configs" title="Upgrade service configs">
+      <direction>UPGRADE</direction>   <!--  prevent config changes on downgrade -->
+      <skippable>false</skippable>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger">
+        <task xsi:type="configure" id="hdp_2_3_0_0_update_ranger_env"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger">
+        <task xsi:type="configure" id="hdp_2_3_0_0_update_ranger_admin"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger">
+        <task xsi:type="server_action" summary="Calculating Ranger Properties" class="org.apache.ambari.server.serveraction.upgrades.RangerConfigCalculation" />
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger">
+        <task xsi:type="configure" id="hdp_2_3_0_0_update_ranger_usersync"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger">
+        <task xsi:type="configure" id="hdp_2_3_0_0_update_ranger_site"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger">
+        <task xsi:type="configure" id="hdp_2_3_0_0_update_ranger_usersync_properties"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger">
+        <task xsi:type="configure" id="hdp_2_3_0_0_update_ranger_delete_oracle_home"/>
+      </execute-stage>
+
+      <!--HDFS-->
+      <execute-stage service="HDFS" component="NAMENODE">
+        <task xsi:type="configure" id="hdp_2_3_0_0_modify_hadoop_env"/>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hdfs_adjust_ranger_plugin"/>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hdfs_transition_ranger_hdfs_policy"/>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hdfs_transition_ranger_hdfs_audit"/>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hdfs_transition_ranger_hdfs_security"/>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hdfs_ranger_hdfs_delete_old_properties"/>
+      </execute-stage>
+
+      <!--YARN-->
+      <execute-stage service="MAPREDUCE2" component="HISTORYSERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_mapreduce2_adjust_history_server"/>
+      </execute-stage>
+
+      <execute-stage service="YARN" component="APP_TIMELINE_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_yarn_ats_enable_recovery"/>
+      </execute-stage>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_disable_node_labels"/>
+      </execute-stage>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_clear_default_node_label_expression"/>
+      </execute-stage>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_check_cs_root_def_capacity"/>
+      </execute-stage>
+
+      <execute-stage service="YARN" component="RESOURCEMANAGER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_check_cs_root_max_capacity"/>
+      </execute-stage>
+
+
+      <!--HBASE-->
+      <execute-stage service="HBASE" component="HBASE_MASTER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_adjust_phoenix_scheduler_factory"/>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_adjust_phoenix_rpc_controller_factory"/>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_set_global_memstore_size"/>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER">
+        <task xsi:type="server_action" summary="Calculating HBase Properties" class="org.apache.ambari.server.serveraction.upgrades.HBaseConfigCalculation" />
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_adjust_phoenix_indexed_wal_edit_codec"/>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_adjust_authorization_coprocessors"/>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_transition_ranger_hbase_policy"/>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_transition_ranger_hbase_audit"/>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_copy_ranger_policies"/>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_delete_old_ranger_properties"/>
+      </execute-stage>
+
+
+      <!--TEZ-->
+      <execute-stage service="TEZ" component="TEZ_CLIENT">
+        <task xsi:type="configure" id="hdp_2_3_0_0_tez_client_adjust_properties"/>
+      </execute-stage>
+
+
+      <!--HIVE-->
+      <execute-stage service="HIVE" component="HIVE_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_replace_auth_manager"/>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_authentification"/>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_ranger_policy"/>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_ranger_security"/>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_ranger_audit"/>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_remove_deprecated_ranger_properties"/>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="WEBHCAT_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_webhcat_server_update_env"/>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="WEBHCAT_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_webhcat_server_update_configuration_paths"/>
+      </execute-stage>
+
+
+      <!--OOZIE-->
+      <execute-stage service="OOZIE" component="OOZIE_SERVER">
+        <task xsi:type="configure" id="hdp_2_3_0_0_oozie_remove_redundant_configurations"/>
+      </execute-stage>
+
+
+      <!--KNOX-->
+      <execute-stage service="KNOX" component="KNOX_GATEWAY">
+        <task xsi:type="configure" id="hdp_2_3_0_0_knox_configure_ranger_policy"/>
+      </execute-stage>
+
+      <execute-stage service="KNOX" component="KNOX_GATEWAY">
+        <task xsi:type="configure" id="hdp_2_3_0_0_knox_configure_ranger_knox_audit"/>
+      </execute-stage>
+
+      <execute-stage service="KNOX" component="KNOX_GATEWAY">
+        <task xsi:type="configure" id="hdp_2_3_0_0_knox_remove_deprecated_ranger_properties"/>
+      </execute-stage>
+
+      <!--STORM-->
+      <execute-stage service="STORM" component="NIMBUS">
+        <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_convert_nimbus_host_to_seeds"/>
+      </execute-stage>
+
+      <execute-stage service="STORM" component="NIMBUS">
+        <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_update_env_vars"/>
+      </execute-stage>
+
+      <execute-stage service="STORM" component="NIMBUS">
+        <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_configure_ranger_policy"/>
+      </execute-stage>
+
+      <execute-stage service="STORM" component="NIMBUS">
+        <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_configure_ranger_audit"/>
+      </execute-stage>
+
+      <execute-stage service="STORM" component="NIMBUS">
+        <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_remove_deprecated_ranger_properties"/>
+      </execute-stage>
+
+    </group>
+
+
+    <!-- After processing this group, will change the effective Stack of the UpgradeContext object. -->
+    <group xsi:type="update-stack" name="UPDATE_DESIRED_STACK_ID" title="Update Desired Stack Id">
+      <execute-stage title="Update Desired Stack Id" service="" component="">
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.UpdateDesiredStackAction">
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="cluster" name="ALL_HOST_OPS" title="Set Version On All Hosts">
+      <skippable>true</skippable>
+      <execute-stage title="Update stack to {{version}}">
+        <task xsi:type="execute">
+          <script>scripts/ru_set_all.py</script>
+          <function>actionexecute</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <!-- Now, restart all of the services. -->
+
+    <group xsi:type="restart" name="ZOOKEEPER" title="Zookeeper">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="ZOOKEEPER">
+        <service-check>false</service-check>
+        <component>ZOOKEEPER_SERVER</component>
+        <component>ZOOKEEPER_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="RANGER" title="Ranger">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="RANGER">
+        <component>RANGER_ADMIN</component>
+        <component>RANGER_USERSYNC</component>
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="HDFS" title="HDFS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="HDFS">
+        <component>JOURNALNODE</component>
+        <component>ZKFC</component>
+        <component>NAMENODE</component>
+        <component>SECONDARY_NAMENODE</component>
+        <component>DATANODE</component>
+        <component>HDFS_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="HDFS_LEAFE_SAFEMODE" title="HDFS - Wait to leave Safemode">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <direction>UPGRADE</direction>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Wait to leave Safemode">
+        <task xsi:type="execute" hosts="master" summary="Wait for NameNode to leave Safemode">
+          <script>scripts/namenode.py</script>
+          <function>wait_for_safemode_off</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="restart" name="MR and YARN" title="MR and YARN">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="MAPREDUCE2">
+        <component>HISTORYSERVER</component>
+        <component>MAPREDUCE2_CLIENT</component>
+      </service>
+      <service name="YARN">
+        <component>APP_TIMELINE_SERVER</component>
+        <component>RESOURCEMANAGER</component>
+        <component>NODEMANAGER</component>
+        <component>YARN_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="HBASE" title="HBASE">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="HBASE">
+        <component>HBASE_MASTER</component>
+        <component>HBASE_REGIONSERVER</component>
+        <component>HBASE_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="CLIENTS" title="Tez, Pig, Sqoop Clients">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+
+      <service name="TEZ">
+        <component>TEZ_CLIENT</component>
+      </service>
+
+      <service name="PIG">
+        <component>PIG</component>
+      </service>
+
+      <service name="SQOOP">
+        <component>SQOOP</component>
+      </service>
+    </group>
+
+    <group name="SERVICE_CHECK" title="All Service Checks" xsi:type="service-check">
+      <skippable>true</skippable>
+      <direction>UPGRADE</direction>
+      <priority>
+        <service>HDFS</service>
+        <service>YARN</service>
+        <service>MAPREDUCE2</service>
+        <service>HBASE</service>
+      </priority>
+    </group>
+
+    <group xsi:type="restart" name="HIVE" title="Hive">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="HIVE">
+        <component>HIVE_METASTORE</component>
+        <component>HIVE_SERVER</component>
+        <component>WEBHCAT_SERVER</component>
+        <component>HIVE_CLIENT</component>
+        <component>HCAT</component>
+      </service>
+    </group>
+
+    <!-- Upgrade Oozie DB only on Upgrade direction, and always create a new ShareLib. -->
+    <group name="Upgrade Oozie" title="Upgrade Oozie Database">
+      <direction>UPGRADE</direction>
+      <skippable>true</skippable>
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Upgrade Oozie Database">
+        <task xsi:type="execute" hosts="any" summary="Upgrading the database and creating a new sharelib">
+          <script>scripts/oozie_server_upgrade.py</script>
+          <function>upgrade_oozie_database_and_sharelib</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <!-- Only create the ShareLib folder during a Downgrade. -->
+    <group name="Downgrade Oozie" title="Downgrade Oozie ShareLib">
+      <direction>DOWNGRADE</direction>
+      <skippable>true</skippable>
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Downgrade Oozie ShareLib">
+        <task xsi:type="execute" hosts="any" summary="Upgrading the database and creating a new sharelib">
+          <script>scripts/oozie_server_upgrade.py</script>
+          <function>create_sharelib</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="restart" name="OOZIE" title="Oozie">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="OOZIE">
+        <component>OOZIE_SERVER</component>
+        <component>OOZIE_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="FALCON" title="Falcon">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="FALCON">
+        <component>FALCON_SERVER</component>
+        <component>FALCON_CLIENT</component>        
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="KAFKA" title="Kafka">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="KAFKA">
+        <component>KAFKA_BROKER</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="KNOX" title="Knox">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="KNOX">
+        <component>KNOX_GATEWAY</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="STORM" title="Storm">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="STORM">
+        <component>NIMBUS</component>
+        <component>STORM_REST_API</component>
+        <component>SUPERVISOR</component>
+        <component>STORM_UI_SERVER</component>
+        <component>DRPC_SERVER</component>
+      </service>
+
+      <execute-stage service="STORM" component="DRPC_SERVER" title="Rebuild Storm Topology">
+        <task xsi:type="manual">
+          <message>Please rebuild your topology using the new Storm version dependencies and resubmit it using the newly created jar.</message>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="restart" name="FLUME" title="Flume">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="FLUME">
+        <component>FLUME_HANDLER</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
+      <skippable>true</skippable>
+      <execute-stage title="Check Unhealthy Hosts" id="unhealthy-hosts">
+        <task xsi:type="manual">
+          <message>The following hosts were unhealthy and should be resolved before finalizing can be completed: {{hosts.unhealthy}}</message>
+        </task>
+      </execute-stage>
+      
+      <execute-stage title="Confirm Finalize">
+        <direction>UPGRADE</direction>
+        <task xsi:type="manual">
+          <message>Please confirm you are ready to finalize.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Execute HDFS Finalize">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/namenode.py</script>
+          <function>finalize_non_rolling_upgrade</function>
+        </task>
+      </execute-stage>
+
+      <execute-stage title="Save Cluster State" service="" component="">
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction">
+        </task>
+      </execute-stage>
+    </group>
+  </order>
+</upgrade>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e652b358/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
new file mode 100644
index 0000000..e92a413
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
@@ -0,0 +1,519 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+
+<upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+  <target>2.3.*.*</target>
+  <target-stack>HDP-2.3</target-stack>
+  <type>NON_ROLLING</type>
+  <prechecks>
+    <!--TODO: do we have any?-->
+  </prechecks>
+  <order>
+    <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
+      <skippable>true</skippable>
+      <direction>UPGRADE</direction>
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues">
+        <task xsi:type="manual">
+          <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="SLIDER" component="SLIDER" title="Stop Long Running Applications on Slider">
+        <task xsi:type="manual">
+          <message>Before continuing, please stop all long-running applications deployed using Slider. E.g., su - yarn "/usr/hdp/current/slider-client/bin/slider stop &lt;app_name&gt;"</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="STORM" component="NIMBUS" title="Stop Storm Topologies">
+        <task xsi:type="manual">
+          <message>Before continuing, please deactivate and kill any currently running topologies.</message>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="stop" name="Stop High-Level Daemons" title="Stop Daemons for High-Level Services">
+      <skippable>true</skippable>
+      <service-check>false</service-check>
+
+      <service name="FLUME">
+        <component>FLUME_HANDLER</component>
+      </service>
+
+      <service name="STORM">
+        <component>DRPC_SERVER</component>
+        <component>STORM_UI_SERVER</component>
+        <component>SUPERVISOR</component>
+        <component>STORM_REST_API</component>
+        <component>NIMBUS</component>
+      </service>
+
+      <service name="KNOX">
+        <component>KNOX_GATEWAY</component>
+      </service>
+
+      <service name="KAFKA">
+        <component>KAFKA_BROKER</component>
+      </service>
+
+      <service name="FALCON">
+        <component>FALCON_SERVER</component>
+      </service>
+
+      <service name="OOZIE">
+        <component>OOZIE_SERVER</component>
+      </service>
+
+      <service name="SPARK">
+        <component>SPARK_JOBHISTORYSERVER</component>
+      </service>
+
+      <service name="HIVE">
+        <component>WEBHCAT_SERVER</component>
+        <component>HIVE_SERVER</component>
+        <component>HIVE_METASTORE</component>
+      </service>
+
+      <service name="YARN">
+        <component>NODEMANAGER</component>        
+        <component>RESOURCEMANAGER</component>
+        <component>APP_TIMELINE_SERVER</component>
+      </service>
+
+      <service name="MAPREDUCE2">
+        <component>HISTORYSERVER</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="Backups" title="Take Backups">
+      <direction>UPGRADE</direction>
+      <skippable>true</skippable>
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Backup Oozie Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Oozie Server database on {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_METASTORE" title="Backup Hive Metastore">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Hive Metastore database located on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <!--TODO(dlysnichenko): This step is missing in 2.3->2.3+ RU pack -->
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Backup Knox Data">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Knox data. E.g., "cp -RL /etc/knox/data/security ~/knox_backup" on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot HBASE">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/hbase_upgrade.py</script>
+          <function>take_snapshot</function>        <!-- TODO (Alejandro), this function used to be called just "snapshot" -->
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Snapshot HDFS">
+        <task xsi:type="execute" hosts="master">  <!-- TODO (Alejandro), this can be any NameNode, not just the active. -->
+          <script>scripts/namenode.py</script>
+          <function>prepare_non_rolling_upgrade</function>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Backup Ranger">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Ranger Admin database and Ranger Audit database on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Pre Upgrade Ranger KMS">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup Ranger KMS database</message>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="stop" name="Stop Low-Level Daemons" title="Stop Daemons for Low-Level Services">
+      <skippable>true</skippable>
+      <service-check>false</service-check>
+
+      <service name="HBASE">
+        <component>HBASE_REGIONSERVER</component>
+        <component>HBASE_MASTER</component>
+        <component>PHOENIX_QUERY_SERVER</component>
+      </service>
+
+      <service name="HDFS">
+        <component>DATANODE</component>
+        <component>NAMENODE</component>
+        <component>SECONDARY_NAMENODE</component>   <!-- TODO (Alejandro), may not be present. -->
+        <component>ZKFC</component>                 <!-- TODO (Alejandro), may not be present. -->
+        <component>JOURNALNODE</component>          <!-- TODO (Alejandro), may not be present. -->
+        <component>NFS_GATEWAY</component>
+      </service>
+
+      <service name="RANGER">
+        <component>RANGER_USERSYNC</component>
+        <component>RANGER_ADMIN</component>
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+
+      <service name="ZOOKEEPER">
+        <component>ZOOKEEPER_SERVER</component>
+      </service>
+    </group>
+
+    <group name="Marker for Downgrade" title="Marker for Downgrade">
+      <direction>UPGRADE</direction>
+      <!-- TODO (Alejandro), if the user attempts a downgrade before this step, they can simply abort. -->
+    </group>
+
+    <group xsi:type="cluster" name="Restore Backups" title="Restore Backups">
+      <direction>DOWNGRADE</direction>
+      <skippable>true</skippable>
+
+      <!-- If the user attempts a downgrade after this point, they will need to restore backups
+      before starting any of the services. -->
+
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Backup Oozie Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please restore the Oozie Server database on {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_METASTORE" title="Backup Hive Metastore">
+        <task xsi:type="manual">
+          <message>Before continuing, please restore the Hive Metastore database located on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Backup Knox Data">
+        <task xsi:type="manual">
+          <message>Before continuing, please restore the Knox data. E.g., "cp -RL ~/knox_backup/* /etc/knox/data/security/" on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot HBASE">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/hbase_upgrade.py</script>
+          <function>restore_snapshot</function>   <!-- TODO (Alejandro), this function name is new. -->
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Snapshot HDFS">
+        <task xsi:type="execute" hosts="master">  <!-- TODO (Alejandro), this can be any NameNode, not just the active. -->
+          <script>scripts/namenode.py</script>
+          <function>restore_snapshot</function>    <!-- TODO (Alejandro), this function doesn't exist yet. -->
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Backup Ranger">
+        <task xsi:type="manual">
+          <message>Before continuing, please restore the Ranger Admin database and Ranger Audit database on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+    </group>
+
+    <!-- After processing this group, will change the effective Stack of the UpgradeContext object. -->
+    <group xsi:type="update-stack" name="UPDATE_DESIRED_STACK_ID" title="Update Desired Stack Id">
+      <execute-stage title="Update Desired Stack Id" service="" component="">
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.UpdateDesiredStackAction">
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="cluster" name="ALL_HOST_OPS" title="Set Version On All Hosts">
+      <skippable>true</skippable>
+      <execute-stage title="Update stack to {{version}}">
+        <task xsi:type="execute">
+          <script>scripts/ru_set_all.py</script>
+          <function>actionexecute</function>      
+        </task>
+      </execute-stage>
+    </group>
+
+    <!-- Now, restart all of the services. -->
+
+    <group xsi:type="restart" name="ZOOKEEPER" title="Zookeeper">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="ZOOKEEPER">
+        <service-check>false</service-check>        <!-- TODO (Alejandro), enable service-check once done testing -->
+        <component>ZOOKEEPER_SERVER</component>
+        <component>ZOOKEEPER_CLIENT</component>     
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="RANGER" title="Ranger">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="RANGER">
+        <component>RANGER_ADMIN</component>
+        <component>RANGER_USERSYNC</component>
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="HDFS" title="HDFS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="HDFS">
+        <component>JOURNALNODE</component>
+        <component>ZKFC</component>
+        <component>NAMENODE</component>
+        <component>SECONDARY_NAMENODE</component>
+        <component>DATANODE</component>
+        <component>HDFS_CLIENT</component>
+        <component>NFS_GATEWAY</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="HDFS_LEAFE_SAFEMODE" title="HDFS - Wait to leave Safemode">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <direction>UPGRADE</direction>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Wait to leave Safemode">
+        <task xsi:type="execute" hosts="master" summary="Wait for NameNode to leave Safemode">
+          <script>scripts/namenode.py</script>
+          <function>wait_for_safemode_off</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="restart" name="MR and YARN" title="MR and YARN">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="MAPREDUCE2">
+        <component>HISTORYSERVER</component>
+        <component>MAPREDUCE2_CLIENT</component>    
+      </service>
+      <service name="YARN">
+        <component>APP_TIMELINE_SERVER</component>
+        <component>RESOURCEMANAGER</component>
+        <component>NODEMANAGER</component>          
+        <component>YARN_CLIENT</component>          
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="HBASE" title="HBASE">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="HBASE">
+        <component>HBASE_MASTER</component>
+        <component>HBASE_REGIONSERVER</component>   
+        <component>HBASE_CLIENT</component>         
+        <component>PHOENIX_QUERY_SERVER</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="CLIENTS" title="Tez, Pig, Sqoop Clients">  
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+
+      <service name="TEZ">
+        <component>TEZ_CLIENT</component>
+      </service>
+
+      <service name="MAHOUT">
+        <component>MAHOUT</component>
+      </service>
+
+      <service name="PIG">
+        <component>PIG</component>
+      </service>
+
+      <service name="SQOOP">
+        <component>SQOOP</component>
+      </service>
+    </group>
+
+    <group name="SERVICE_CHECK" title="All Service Checks" xsi:type="service-check">
+      <skippable>true</skippable>
+      <direction>UPGRADE</direction>
+      <priority>
+        <service>HDFS</service>
+        <service>YARN</service>
+        <service>MAPREDUCE2</service>
+        <service>HBASE</service>
+      </priority>
+    </group>
+
+    <group xsi:type="restart" name="HIVE" title="Hive">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="HIVE">
+        <component>HIVE_METASTORE</component>
+        <component>HIVE_SERVER</component>
+        <component>WEBHCAT_SERVER</component>
+        <component>HIVE_CLIENT</component>          
+        <component>HCAT</component>                 
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="SPARK" title="Spark">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="SPARK">
+        <component>SPARK_JOBHISTORYSERVER</component>
+        <component>SPARK_CLIENT</component>         
+      </service>
+    </group>
+
+    <!-- Upgrade Oozie DB only on Upgrade direction, and always create a new ShareLib. -->
+    <group name="Upgrade Oozie" title="Upgrade Oozie Database">
+      <direction>UPGRADE</direction>
+      <skippable>true</skippable>
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Upgrade Oozie Database">
+        <task xsi:type="execute" hosts="any" summary="Upgrading the database and creating a new sharelib">
+          <script>scripts/oozie_server_upgrade.py</script>
+          <function>upgrade_oozie_database_and_sharelib</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <!-- Only create the ShareLib folder during a Downgrade. -->
+    <group name="Downgrade Oozie" title="Downgrade Oozie ShareLib">
+      <direction>DOWNGRADE</direction>
+      <skippable>true</skippable>
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Downgrade Oozie ShareLib">
+        <task xsi:type="execute" hosts="any" summary="Upgrading the database and creating a new sharelib">
+          <script>scripts/oozie_server_upgrade.py</script>
+          <function>create_sharelib</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="restart" name="OOZIE" title="Oozie">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="OOZIE">
+        <component>OOZIE_SERVER</component>
+        <component>OOZIE_CLIENT</component>         
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="FALCON" title="Falcon">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="FALCON">
+        <component>FALCON_SERVER</component>
+        <component>FALCON_CLIENT</component>        
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="KAFKA" title="Kafka">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="KAFKA">
+        <component>KAFKA_BROKER</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="KNOX" title="Knox">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="KNOX">
+        <component>KNOX_GATEWAY</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="STORM" title="Storm">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="STORM">
+        <component>NIMBUS</component>
+        <component>STORM_REST_API</component>
+        <component>SUPERVISOR</component>
+        <component>STORM_UI_SERVER</component>
+        <component>DRPC_SERVER</component>
+      </service>
+
+      <!-- TODO (Alejandro), does this work? -->
+      <execute-stage service="STORM" component="DRPC_SERVER" title="Rebuild Storm Topology">
+        <task xsi:type="manual">
+          <message>Please rebuild your topology using the new Storm version dependencies and resubmit it using the newly created jar.</message>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="restart" name="ACCUMULO" title="Accumulo">
+      <skippable>true</skippable>
+      <service name="ACCUMULO">
+        <component>ACCUMULO_MASTER</component>
+        <component>ACCUMULO_TSERVER</component>
+        <component>ACCUMULO_MONITOR</component>
+        <component>ACCUMULO_GC</component>
+        <component>ACCUMULO_TRACER</component>
+        <component>ACCUMULO_CLIENT</component>
+      </service>
+
+      <!-- TODO (Alejandro), does this work? -->
+      <execute-stage service="STORM" component="DRPC_SERVER" title="Rebuild Storm Topology">
+        <task xsi:type="manual">
+          <message>Please rebuild your topology using the new Storm version dependencies and resubmit it using the newly created jar.</message>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="restart" name="SLIDER" title="Slider">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="SLIDER">
+        <component>SLIDER</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="FLUME" title="Flume">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="FLUME">
+        <component>FLUME_HANDLER</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}">
+      <skippable>true</skippable>
+      <execute-stage title="Check Unhealthy Hosts" id="unhealthy-hosts">
+        <task xsi:type="manual">
+          <message>The following hosts were unhealthy and should be resolved before finalizing can be completed: {{hosts.unhealthy}}</message>
+        </task>
+      </execute-stage>
+      
+      <execute-stage title="Confirm Finalize">
+        <direction>UPGRADE</direction>
+        <task xsi:type="manual">
+          <message>Please confirm you are ready to finalize.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Execute HDFS Finalize">
+        <task xsi:type="execute" hosts="master">      <!-- TODO (Alejandro), what happens if there's no HA. -->
+          <script>scripts/namenode.py</script>
+          <function>finalize_non_rolling_upgrade</function>
+        </task>
+      </execute-stage>
+
+      <execute-stage title="Save Cluster State" service="" component="">
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction">
+        </task>
+      </execute-stage>
+    </group>
+  </order>
+</upgrade>
\ No newline at end of file


[22/50] [abbrv] ambari git commit: AMBARI-13508. Kerberos: editing or saving KDC admin creds POST fails

Posted by nc...@apache.org.
AMBARI-13508. Kerberos: editing or saving KDC admin creds POST fails


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/381c49f4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/381c49f4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/381c49f4

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 381c49f459815117e22d5d45ccf6b408d5760aa2
Parents: 306e44a
Author: Alex Antonenko <hi...@gmail.com>
Authored: Thu Oct 22 16:24:01 2015 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Thu Oct 22 16:24:01 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/utils/credentials.js             | 22 ++++--
 .../kdc_credentials_controller_mixin_test.js    | 72 ++++++++++++++------
 2 files changed, 70 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/381c49f4/ambari-web/app/utils/credentials.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/credentials.js b/ambari-web/app/utils/credentials.js
index cc45288..cf06908 100644
--- a/ambari-web/app/utils/credentials.js
+++ b/ambari-web/app/utils/credentials.js
@@ -87,14 +87,20 @@ module.exports = {
   createOrUpdateCredentials: function(clusterName, alias, resource) {
     var self = this;
     var dfd = $.Deferred();
-    this.createCredentials(clusterName, alias, resource).then(function() {
-      dfd.resolve();
-    }, function() {
+    this.getCredential(clusterName, alias).then(function() {
+      // update previously stored credentials
       self.updateCredentials(clusterName, alias, resource).always(function() {
         var status = arguments[1];
         var result = arguments[2];
         dfd.resolve(status === "success", result);
       });
+    }, function() {
+      // create credentials if they not exist
+      self.createCredentials(clusterName, alias, resource).always(function() {
+        var status = arguments[1];
+        var result = arguments[2];
+        dfd.resolve(status === "success", result);
+      });
     });
     return dfd.promise();
   },
@@ -105,6 +111,7 @@ module.exports = {
    * @member utils.credentials
    * @param {string} clusterName cluster name
    * @param {string} alias credential alias name e.g. "kdc.admin.credentials"
+   * @param {function} [callback] success callback to invoke, credential will be passed to first argument
    * @returns {$.Deferred} promise object
    */
   getCredential: function(clusterName, alias, callback) {
@@ -116,14 +123,19 @@ module.exports = {
         alias: alias,
         callback: callback
       },
-      success: 'getCredentialSuccessCallback'
+      success: 'getCredentialSuccessCallback',
+      error: 'getCredentialErrorCallback'
     });
   },
 
   getCredentialSuccessCallback: function(data, opt, params) {
-    params.callback(Em.getWithDefault(data, 'Credential', null));
+    if (params.callback) {
+      params.callback(Em.getWithDefault(data, 'Credential', null));
+    }
   },
 
+  getCredentialErrorCallback: function() {},
+
   /**
    * Update credential by alias and cluster name
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/381c49f4/ambari-web/test/mixins/common/kdc_credentials_controller_mixin_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/mixins/common/kdc_credentials_controller_mixin_test.js b/ambari-web/test/mixins/common/kdc_credentials_controller_mixin_test.js
index 2553397..9f05cef 100644
--- a/ambari-web/test/mixins/common/kdc_credentials_controller_mixin_test.js
+++ b/ambari-web/test/mixins/common/kdc_credentials_controller_mixin_test.js
@@ -76,6 +76,12 @@ describe('App.KDCCredentialsControllerMixin', function() {
         value: value
       });
     };
+    var resolveWith = function(data) {
+      return $.Deferred().resolve(data).promise();
+    };
+    var rejectWith = function(data) {
+      return $.Deferred().reject(data).promise();
+    };
     [
       {
         configs: [
@@ -83,6 +89,9 @@ describe('App.KDCCredentialsControllerMixin', function() {
           createConfig('admin_principal', 'admin/admin'),
           createConfig('persist_credentials', 'true')
         ],
+        credentialsExists: false,
+        createCredentialFnCalled: true,
+        updateCredentialFnCalled: false,
         e: [
           'testName',
           'kdc.admin.credential',
@@ -92,7 +101,27 @@ describe('App.KDCCredentialsControllerMixin', function() {
             principal: 'admin/admin'
           }
         ],
-        message: 'Save Admin credentials checkbox checked, credentials should be saved as `persisted`'
+        message: 'Save Admin credentials checkbox checked, credentials already stored and should be updated as `persisted`'
+      },
+      {
+        configs: [
+          createConfig('admin_password', 'admin'),
+          createConfig('admin_principal', 'admin/admin'),
+          createConfig('persist_credentials', 'true')
+        ],
+        credentialsExists: true,
+        createCredentialFnCalled: false,
+        updateCredentialFnCalled: true,
+        e: [
+          'testName',
+          'kdc.admin.credential',
+          {
+            type: 'persisted',
+            key: 'admin',
+            principal: 'admin/admin'
+          }
+        ],
+        message: 'Save Admin credentials checkbox checked, no stored credentials, should be created as `persisted`'
       },
       {
         configs: [
@@ -100,6 +129,9 @@ describe('App.KDCCredentialsControllerMixin', function() {
           createConfig('admin_principal', 'admin/admin'),
           createConfig('persist_credentials', 'false')
         ],
+        credentialsExists: true,
+        createCredentialFnCalled: false,
+        updateCredentialFnCalled: true,
         e: [
           'testName',
           'kdc.admin.credential',
@@ -109,7 +141,7 @@ describe('App.KDCCredentialsControllerMixin', function() {
             principal: 'admin/admin'
           }
         ],
-        message: 'Save Admin credentials checkbox un-checked, credentials should be saved as `temporary`'
+        message: 'Save Admin credentials checkbox unchecked, credentials already stored and should be updated as `temporary`'
       },
       {
         configs: [
@@ -117,6 +149,9 @@ describe('App.KDCCredentialsControllerMixin', function() {
           createConfig('admin_principal', 'admin/admin'),
           createConfig('persist_credentials', 'false')
         ],
+        credentialsExists: false,
+        createCredentialFnCalled: true,
+        updateCredentialFnCalled: false,
         e: [
           'testName',
           'kdc.admin.credential',
@@ -126,24 +161,20 @@ describe('App.KDCCredentialsControllerMixin', function() {
             principal: 'admin/admin'
           }
         ],
-        credentialWasSaved: true,
-        message: 'Save Admin credentials checkbox checked, credential was saved, credentials should be saved as `temporary`, #updateKDCCredentials should be called'
+        message: 'Save Admin credentials checkbox unchecked, credentials already stored and should be updated as `temporary`'
       }
     ].forEach(function(test) {
       it(test.message, function() {
         sinon.stub(App, 'get').withArgs('clusterName').returns('testName');
+        sinon.stub(credentialsUtils, 'getCredential', function(clusterName, alias) {
+          return test.credentialsExists ? resolveWith() : rejectWith();
+        });
         sinon.stub(credentialsUtils, 'createCredentials', function() {
-          if (test.credentialWasSaved) {
-            return $.Deferred().reject().promise();
-          } else {
-            return $.Deferred().resolve().promise();
-          }
+          return resolveWith();
+        });
+        sinon.stub(credentialsUtils, 'updateCredentials', function() {
+          return resolveWith();
         });
-        if (test.credentialWasSaved) {
-          sinon.stub(credentialsUtils, 'updateCredentials', function() {
-            return $.Deferred().resolve().promise();
-          });
-        }
 
         mixedObject.reopen({
           isStorePersisted: function() {
@@ -151,14 +182,17 @@ describe('App.KDCCredentialsControllerMixin', function() {
           }.property()
         });
         mixedObject.createKDCCredentials(test.configs);
-        assert.isTrue(credentialsUtils.createCredentials.calledOnce, 'credentialsUtils#createCredentials called');
-        assert.deepEqual(credentialsUtils.createCredentials.args[0], test.e, 'credentialsUtils#createCredentials called with correct arguments');
+        assert.equal(credentialsUtils.createCredentials.calledOnce, test.createCredentialFnCalled,  'credentialsUtils#createCredentials called');
+        if (test.createCredentialFnCalled) {
+          assert.deepEqual(credentialsUtils.createCredentials.args[0], test.e, 'credentialsUtils#createCredentials called with correct arguments');
+        }
         credentialsUtils.createCredentials.restore();
-        if (test.credentialWasSaved) {
-          assert.isTrue(credentialsUtils.updateCredentials.calledOnce, 'credentialUtils#updateCredentials called');
+        assert.equal(credentialsUtils.updateCredentials.calledOnce, test.updateCredentialFnCalled, 'credentialUtils#updateCredentials called');
+        if (test.updateCredentialFnCalled) {
           assert.deepEqual(credentialsUtils.updateCredentials.args[0], test.e, 'credentialUtils#updateCredentials called with correct arguments');
-          credentialsUtils.updateCredentials.restore();
         }
+        credentialsUtils.updateCredentials.restore();
+        credentialsUtils.getCredential.restore();
         App.get.restore();
       });
     });


[42/50] [abbrv] ambari git commit: AMBARI-13536. Timezone: UX (onechiporenko)

Posted by nc...@apache.org.
AMBARI-13536. Timezone: UX (onechiporenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4e2f5f86
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4e2f5f86
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4e2f5f86

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 4e2f5f8607c8b52a87475aed15aededca53802fe
Parents: 4b66a82
Author: Oleg Nechiporenko <on...@apache.org>
Authored: Fri Oct 23 12:13:45 2015 +0300
Committer: Oleg Nechiporenko <on...@apache.org>
Committed: Fri Oct 23 12:24:00 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/styles/application.less          |  5 ++
 ambari-web/app/styles/common.less               |  1 +
 .../app/templates/common/chart/linear_time.hbs  | 50 ++++++++++----------
 3 files changed, 32 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4e2f5f86/ambari-web/app/styles/application.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/application.less b/ambari-web/app/styles/application.less
index 3b13d1f..491e59c 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -2208,6 +2208,11 @@ a:focus {
 }
 
 /*start chart/style graphs*/
+.chart-wrapper {
+  .timezone {
+    font-size: @smaller-font-size;
+  }
+}
 .chart-container {
   cursor: pointer;
   cursor: -moz-zoom-in;

http://git-wip-us.apache.org/repos/asf/ambari/blob/4e2f5f86/ambari-web/app/styles/common.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/common.less b/ambari-web/app/styles/common.less
index a0252d3..7bdfbdb 100644
--- a/ambari-web/app/styles/common.less
+++ b/ambari-web/app/styles/common.less
@@ -160,6 +160,7 @@
 @top-nav-menu-dropdown-bg-color: #ffffff;
 @top-nav-menu-dropdown-text-color: #333333;
 @default-font-size: 14px;
+@smaller-font-size: 12px;
 
 .editable-list-container.well{
   padding: 10px;

http://git-wip-us.apache.org/repos/asf/ambari/blob/4e2f5f86/ambari-web/app/templates/common/chart/linear_time.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/chart/linear_time.hbs b/ambari-web/app/templates/common/chart/linear_time.hbs
index 60937de..575839e 100644
--- a/ambari-web/app/templates/common/chart/linear_time.hbs
+++ b/ambari-web/app/templates/common/chart/linear_time.hbs
@@ -15,28 +15,30 @@
 * See the License for the specific language governing permissions and
 * limitations under the License.
 }}
-
-<div {{bindAttr class="view.isReady:hide:show :screensaver :no-borders :chart-container"}}></div>
-<div {{bindAttr class="view.isReady::hidden :time-label"}}>
-  {{view.parentView.currentTimeState.name}}
-  <a {{bindAttr class="view.isExportButtonHidden:hidden :corner-icon :pull-right"}} href="#" {{action toggleFormatsList target="view"}}>
-   {{t common.export}} <i class="icon-save"></i>
-  </a>
-  {{view view.exportMetricsMenuView}}
-</div>
-{{#if view.isTimePagingEnable}}
-  <div {{bindAttr class="view.leftArrowVisible:visibleArrow :arrow-left"}} {{action "switchTimeBack" target="view.parentView"}}></div>
-{{/if}}
-<div {{bindAttr id="view.containerId"}} {{bindAttr class="view.isReady:show:hide view.containerClass :chart-container"}}>
-  <div {{bindAttr id="view.yAxisId"}} {{bindAttr class="view.yAxisClass :chart-y-axis"}}></div>
-  <div {{bindAttr id="view.xAxisId"}} {{bindAttr class="view.xAxisClass :chart-x-axis"}}></div>
-  <div {{bindAttr id="view.legendId"}} {{bindAttr class="view.legendClass :chart-legend"}}></div>
-  <div {{bindAttr id="view.chartId"}} {{bindAttr class="view.chartClass :chart"}}></div>
-  <div {{bindAttr id="view.titleId"}} {{bindAttr class="view.titleClass view.isExportButtonHidden::has-data :chart-title"}}>{{view.title}}</div>
-</div>
-{{#if view.isTimePagingEnable}}
-  <div {{bindAttr class="view.rightArrowVisible:visibleArrow :arrow-right"}} {{action "switchTimeForward" "forward" target="view.parentView"}}></div>
-{{/if}}
-<div class="timezone mtl15 mll">
-  {{t common.timezone}}: <strong>{{App.router.userSettingsController.userSettings.timezone.label}}</strong>
+<div class="chart-wrapper">
+  <div {{bindAttr class="view.isReady:hide:show :screensaver :no-borders :chart-container"}}></div>
+  <div {{bindAttr class="view.isReady::hidden :time-label"}}>
+    {{view.parentView.currentTimeState.name}}
+    <a {{bindAttr class="view.isExportButtonHidden:hidden :corner-icon :pull-right"}}
+        href="#" {{action toggleFormatsList target="view"}}>
+      {{t common.export}} <i class="icon-save"></i>
+    </a>
+    {{view view.exportMetricsMenuView}}
+  </div>
+  {{#if view.isTimePagingEnable}}
+    <div {{bindAttr class="view.leftArrowVisible:visibleArrow :arrow-left"}} {{action "switchTimeBack" target="view.parentView"}}></div>
+  {{/if}}
+  <div {{bindAttr id="view.containerId"}} {{bindAttr class="view.isReady:show:hide view.containerClass :chart-container"}}>
+    <div {{bindAttr id="view.yAxisId"}} {{bindAttr class="view.yAxisClass :chart-y-axis"}}></div>
+    <div {{bindAttr id="view.xAxisId"}} {{bindAttr class="view.xAxisClass :chart-x-axis"}}></div>
+    <div {{bindAttr id="view.legendId"}} {{bindAttr class="view.legendClass :chart-legend"}}></div>
+    <div {{bindAttr id="view.chartId"}} {{bindAttr class="view.chartClass :chart"}}></div>
+    <div {{bindAttr id="view.titleId"}} {{bindAttr class="view.titleClass view.isExportButtonHidden::has-data :chart-title"}}>{{view.title}}</div>
+  </div>
+  {{#if view.isTimePagingEnable}}
+    <div {{bindAttr class="view.rightArrowVisible:visibleArrow :arrow-right"}} {{action "switchTimeForward" "forward" target="view.parentView"}}></div>
+  {{/if}}
+  <div class="timezone mtl15 mll">
+    <strong>{{t common.timezone}}</strong>: {{App.router.userSettingsController.userSettings.timezone.label}}
+  </div>
 </div>
\ No newline at end of file


[32/50] [abbrv] ambari git commit: AMBARI-13158: error message is not accurate when a directory field value has a space (jaoki)

Posted by nc...@apache.org.
AMBARI-13158: error message is not accurate when a directory field value has a space (jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/063e79f6
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/063e79f6
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/063e79f6

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 063e79f686f849ca6c93f7a0c63ebfb02e6b005f
Parents: 0294343
Author: Jun Aoki <ja...@apache.org>
Authored: Thu Oct 22 16:30:52 2015 -0700
Committer: Jun Aoki <ja...@apache.org>
Committed: Thu Oct 22 16:31:08 2015 -0700

----------------------------------------------------------------------
 ambari-web/app/models/configs/objects/service_config_property.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/063e79f6/ambari-web/app/models/configs/objects/service_config_property.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/objects/service_config_property.js b/ambari-web/app/models/configs/objects/service_config_property.js
index d58731c..2c7aa87 100644
--- a/ambari-web/app/models/configs/objects/service_config_property.js
+++ b/ambari-web/app/models/configs/objects/service_config_property.js
@@ -356,7 +356,7 @@ App.ServiceConfigProperty = Em.Object.extend({
             }
           } else {
             if (!validator.isValidDir(value)) {
-              this.set('errorMessage', 'Must be a slash or drive at the start');
+              this.set('errorMessage', 'Must be a slash or drive at the start, and must not contain white spaces');
               isError = true;
             }
           }


[43/50] [abbrv] ambari git commit: AMBARI-13537 ATS & JHS HA: UI changes are needed for QuickLink for JHS points to ViP. (atkach)

Posted by nc...@apache.org.
AMBARI-13537 ATS & JHS HA: UI changes are needed for QuickLink for JHS points to ViP. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e7f77b6b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e7f77b6b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e7f77b6b

Branch: refs/heads/branch-dev-patch-upgrade
Commit: e7f77b6b25fb3b29ca7fff40994e5f7a79c0025d
Parents: 4e2f5f8
Author: Andrii Tkach <at...@hortonworks.com>
Authored: Fri Oct 23 12:48:31 2015 +0300
Committer: Andrii Tkach <at...@hortonworks.com>
Committed: Fri Oct 23 12:48:31 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/models/quick_links.js                | 8 ++++----
 ambari-web/app/views/common/quick_view_link_view.js | 5 +++++
 2 files changed, 9 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e7f77b6b/ambari-web/app/models/quick_links.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/quick_links.js b/ambari-web/app/models/quick_links.js
index 8fe6be6..1aac608 100644
--- a/ambari-web/app/models/quick_links.js
+++ b/ambari-web/app/models/quick_links.js
@@ -229,7 +229,7 @@ App.QuickLinks.FIXTURES = [
     label:'JobHistory UI',
     url:'%@://%@:%@',
     service_id: 'MAPREDUCE2',
-    template:'%@://%@:%@',
+    template:'%@://%@',
     http_config: 'mapreduce.jobhistory.webapp.address',
     https_config: 'mapreduce.jobhistory.webapp.https.address',
     site: 'mapred-site',
@@ -241,7 +241,7 @@ App.QuickLinks.FIXTURES = [
     label:'JobHistory logs',
     url:'%@://%@:%@/logs',
     service_id: 'MAPREDUCE2',
-    template:'%@://%@:%@/logs',
+    template:'%@://%@/logs',
     http_config: 'mapreduce.jobhistory.webapp.address',
     https_config: 'mapreduce.jobhistory.webapp.https.address',
     site: 'mapred-site',
@@ -253,7 +253,7 @@ App.QuickLinks.FIXTURES = [
     label:'JobHistory JMX',
     url:'%@://%@:%@/jmx',
     service_id: 'MAPREDUCE2',
-    template:'%@://%@:%@/jmx',
+    template:'%@://%@/jmx',
     http_config: 'mapreduce.jobhistory.webapp.address',
     https_config: 'mapreduce.jobhistory.webapp.https.address',
     site: 'mapred-site',
@@ -265,7 +265,7 @@ App.QuickLinks.FIXTURES = [
     label:'Thread Stacks',
     url:'%@://%@:%@/stacks',
     service_id: 'MAPREDUCE2',
-    template:'%@://%@:%@/stacks',
+    template:'%@://%@/stacks',
     http_config: 'mapreduce.jobhistory.webapp.address',
     https_config: 'mapreduce.jobhistory.webapp.https.address',
     site: 'mapred-site',

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7f77b6b/ambari-web/app/views/common/quick_view_link_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/quick_view_link_view.js b/ambari-web/app/views/common/quick_view_link_view.js
index 5421e67..89b855e 100644
--- a/ambari-web/app/views/common/quick_view_link_view.js
+++ b/ambari-web/app/views/common/quick_view_link_view.js
@@ -143,10 +143,15 @@ App.QuickViewLinks = Em.View.extend({
 
       quickLinks = this.get('content.quickLinks').map(function (item) {
         var protocol = self.setProtocol(item.get('service_id'), self.get('configProperties'), self.ambariProperties(), item);
+        var siteConfigs = {};
+
         if (item.get('template')) {
           var port = item.get('http_config') && self.setPort(item, protocol);
           if (['FALCON', 'OOZIE', 'ATLAS'].contains(item.get('service_id'))) {
             item.set('url', item.get('template').fmt(protocol, hosts[0], port, App.router.get('loginName')));
+          } else if (item.get('service_id') === 'MAPREDUCE2') {
+            siteConfigs = self.get('configProperties').findProperty('type', item.get('site')).properties;
+            item.set('url', item.get('template').fmt(protocol, siteConfigs[item.get(protocol + '_config')]));
           } else {
             item.set('url', item.get('template').fmt(protocol, hosts[0], port));
           }


[36/50] [abbrv] ambari git commit: AMBARI-13409. AMS Load Simulator updates. (Aravindan Vijayan via swagle)

Posted by nc...@apache.org.
AMBARI-13409. AMS Load Simulator updates. (Aravindan Vijayan via swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b2f306d9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b2f306d9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b2f306d9

Branch: refs/heads/branch-dev-patch-upgrade
Commit: b2f306d95e41cc132a3d92a650d0faa03c44c844
Parents: 063e79f
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Thu Oct 22 17:03:47 2015 -0700
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Thu Oct 22 17:03:47 2015 -0700

----------------------------------------------------------------------
 .../ambari-metrics-timelineservice/pom.xml      |   49 +-
 .../metrics/loadsimulator/LoadRunner.java       |    4 +-
 .../loadsimulator/MetricsLoadSimulator.java     |   17 +
 .../metrics/loadsimulator/data/AppID.java       |    6 +-
 .../jmetertest/AMSJMeterLoadTest.java           |  202 +++
 .../loadsimulator/jmetertest/AppGetMetric.java  |   57 +
 .../jmetertest/GetMetricRequestInfo.java        |   61 +
 .../jmetertest/JmeterTestPlanTask.java          |  269 ++++
 .../loadsimulator/net/RestMetricsSender.java    |    2 +-
 .../src/main/resources/loadsimulator/README     |   65 +
 .../loadsimulator/ams-jmeter.properties         |   56 +
 .../resources/loadsimulator/amsJmeterGraph.jmx  |  104 ++
 .../resources/loadsimulator/jmeter.properties   | 1172 ++++++++++++++++++
 .../loadsimulator/saveservice.properties        |  381 ++++++
 .../main/resources/metrics_def/AMS-HBASE.dat    |   18 +
 .../resources/metrics_def/FLUME_HANDLER.dat     |   40 +
 .../main/resources/metrics_def/KAFKA_BROKER.dat | 1104 +++++++++++++++++
 .../src/main/resources/metrics_def/NIMBUS.dat   |    7 +
 .../main/resources/ui_metrics_def/AMS-HBASE.dat |   26 +
 .../main/resources/ui_metrics_def/DATANODE.dat  |    4 +
 .../resources/ui_metrics_def/FLUME_HANDLER.dat  |   63 +
 .../src/main/resources/ui_metrics_def/HBASE.dat |   47 +
 .../src/main/resources/ui_metrics_def/HOST.dat  |   79 ++
 .../resources/ui_metrics_def/KAFKA_BROKER.dat   |   16 +
 .../main/resources/ui_metrics_def/NAMENODE.dat  |   30 +
 .../main/resources/ui_metrics_def/NIMBUS.dat    |   28 +
 .../resources/ui_metrics_def/NODEMANAGER.dat    |   33 +
 .../ui_metrics_def/RESOURCEMANAGER.dat          |   11 +
 28 files changed, 3945 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/pom.xml b/ambari-metrics/ambari-metrics-timelineservice/pom.xml
index 6e04330..67c278f 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/pom.xml
+++ b/ambari-metrics/ambari-metrics-timelineservice/pom.xml
@@ -420,7 +420,54 @@
       <artifactId>commons-logging</artifactId>
       <version>1.1.1</version>
     </dependency>
-
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-math3</artifactId>
+      <version>3.4.1</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-pool2</artifactId>
+      <version>2.3</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.jmeter</groupId>
+      <artifactId>ApacheJMeter_core</artifactId>
+      <version>2.13</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-nop</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-math3</groupId>
+          <artifactId>commons-math3</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-pool2</groupId>
+          <artifactId>commons-pool2</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.jmeter</groupId>
+      <artifactId>ApacheJMeter_http</artifactId>
+      <version>2.13</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-nop</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-math3</groupId>
+          <artifactId>commons-math3</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-pool2</groupId>
+          <artifactId>commons-pool2</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
       <groupId>org.apache.hadoop</groupId>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/LoadRunner.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/LoadRunner.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/LoadRunner.java
index e5da0a3..203a88b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/LoadRunner.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/LoadRunner.java
@@ -82,13 +82,13 @@ public class LoadRunner {
 
     int startIndex = 0;
     if (createMaster) {
-      String simHost = hostName + ".0";
+      String simHost = hostName + "0";
       addMetricsWorkers(senderWorkers, simHost, metricsHost, MASTER_APPS);
       startIndex++;
     }
 
     for (int i = startIndex; i < threadCount; i++) {
-      String simHost = hostName + "." + i;
+      String simHost = hostName + i;
       addMetricsWorkers(senderWorkers, simHost, metricsHost, SLAVE_APPS);
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/MetricsLoadSimulator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/MetricsLoadSimulator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/MetricsLoadSimulator.java
index a0c1bd2..09db9b5 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/MetricsLoadSimulator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/MetricsLoadSimulator.java
@@ -51,6 +51,23 @@ public class MetricsLoadSimulator {
     loadRunner.start();
   }
 
+  /*
+   Another entry point to the test to be called from ./jmetertest/AMSJMeterLoadTest.java
+   */
+  public static void startTest(Map<String,String> mapArgs) {
+
+    LoadRunner loadRunner = new LoadRunner(
+            mapArgs.get("hostName"),
+            Integer.valueOf(mapArgs.get("numberOfHosts")),
+            mapArgs.get("metricsHostName"),
+            Integer.valueOf(mapArgs.get("collectInterval")),
+            Integer.valueOf(mapArgs.get("sendInterval")),
+            Boolean.valueOf(mapArgs.get("master"))
+    );
+
+    loadRunner.start();
+  }
+
   private static Map<String, String> parseArgs(String[] args) {
     Map<String, String> mapProps = new HashMap<String, String>();
     mapProps.put("hostName", "host");

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/data/AppID.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/data/AppID.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/data/AppID.java
index 4f58dc5..a130171 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/data/AppID.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/data/AppID.java
@@ -25,9 +25,11 @@ public enum AppID {
   DATANODE("datanode"),
   NODEMANAGER("nodemanager"),
   MASTER_HBASE("hbase"),
-  SLAVE_HBASE("hbase");
+  SLAVE_HBASE("hbase"),
+  NIMBUS("nimbus"),
+  KAFKA_BROKER("kafka_broker");
 
-  public static final AppID[] MASTER_APPS = {HOST, NAMENODE, RESOURCEMANAGER, MASTER_HBASE};
+  public static final AppID[] MASTER_APPS = {HOST, NAMENODE, RESOURCEMANAGER, MASTER_HBASE, KAFKA_BROKER, NIMBUS};
   public static final AppID[] SLAVE_APPS = {HOST, DATANODE, NODEMANAGER, SLAVE_HBASE};
 
   private String id;

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/AMSJMeterLoadTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/AMSJMeterLoadTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/AMSJMeterLoadTest.java
new file mode 100644
index 0000000..187c3f1
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/AMSJMeterLoadTest.java
@@ -0,0 +1,202 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.loadsimulator.jmetertest;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.loadsimulator.MetricsLoadSimulator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+
+public class AMSJMeterLoadTest {
+
+  private final static Logger LOG = LoggerFactory.getLogger(AMSJMeterLoadTest.class);
+  private static String PROPERTIES_FILE = "loadsimulator/ams-jmeter.properties";
+  private ScheduledExecutorService scheduledExecutorService = null;
+  private List<AppGetMetric> appGetMetrics;
+  private Properties amsJmeterProperties = null;
+
+  public AMSJMeterLoadTest(String testType, String userDefinedPropertiesFile) {
+
+    if (null == userDefinedPropertiesFile || userDefinedPropertiesFile.isEmpty()) {
+      this.amsJmeterProperties = readProperties(PROPERTIES_FILE);
+    } else {
+      this.amsJmeterProperties = readProperties(userDefinedPropertiesFile);
+    }
+
+    if ("U".equals(testType)) { //GET metrics simulator
+      int numInstances = Integer.valueOf(amsJmeterProperties.getProperty("num-ui-instances"));
+      this.scheduledExecutorService = Executors.newScheduledThreadPool(numInstances);
+      this.appGetMetrics = initializeGetMetricsPayload(amsJmeterProperties);
+      this.runTest(numInstances);
+    } else {                    //PUT Metrics simulator
+      Map<String, String> mapArgs = new HashMap<String, String>();
+      mapArgs.put("hostName", amsJmeterProperties.getProperty("host-prefix"));
+      mapArgs.put("numberOfHosts", amsJmeterProperties.getProperty("num-hosts"));
+      mapArgs.put("metricsHostName", amsJmeterProperties.getProperty("ams-host-port"));
+      mapArgs.put("collectInterval", amsJmeterProperties.getProperty("collection-interval"));
+      mapArgs.put("sendInterval", amsJmeterProperties.getProperty("send-interval"));
+      mapArgs.put("master", amsJmeterProperties.getProperty("create-master"));
+      MetricsLoadSimulator.startTest(mapArgs);
+    }
+  }
+
+  public static Properties readProperties(String propertiesFile) {
+    try {
+      Properties properties = new Properties();
+      InputStream inputStream = ClassLoader.getSystemResourceAsStream(propertiesFile);
+      if (inputStream == null) {
+        inputStream = new FileInputStream(propertiesFile);
+      }
+      properties.load(inputStream);
+      return properties;
+    } catch (IOException ioEx) {
+      LOG.error("Error reading properties file for jmeter");
+      return null;
+    }
+  }
+
+  private static List<GetMetricRequestInfo> readMetricsFromFile(String app) {
+    InputStream input = null;
+    List<GetMetricRequestInfo> metricList = new ArrayList<>();
+    String fileName = "ui_metrics_def/" + app + ".dat";
+
+    try {
+      input = ClassLoader.getSystemResourceAsStream(fileName);
+      BufferedReader reader = new BufferedReader(new InputStreamReader(input));
+      String line;
+      List<String> metrics = new ArrayList<>();
+      while ((line = reader.readLine()) != null) {
+
+        if (line.startsWith("|")) {
+          boolean needsTimestamps = line.contains("startTime");
+          boolean needsHost = line.contains("hostname");
+          metricList.add(new GetMetricRequestInfo(metrics, needsTimestamps, needsHost));
+          metrics.clear();
+        } else {
+          metrics.add(line);
+        }
+      }
+      return metricList;
+    } catch (IOException e) {
+      LOG.error("Cannot read file " + fileName + " for appID " + app, e);
+    } finally {
+      if (input != null) {
+        try {
+          input.close();
+        } catch (IOException ex) {
+        }
+      }
+    }
+    return null;
+  }
+
+  private static List<AppGetMetric> initializeGetMetricsPayload(Properties amsJmeterProperties) {
+
+    List<AppGetMetric> appGetMetrics = new ArrayList<AppGetMetric>();
+    String appsToTest = amsJmeterProperties.getProperty("apps-to-test");
+    String[] apps;
+
+    if (appsToTest != null && !appsToTest.isEmpty()) {
+      apps = StringUtils.split(appsToTest, ",");
+    } else {
+      apps = new String[JmeterTestPlanTask.ClientApp.values().length];
+      int ctr = 0;
+      for (JmeterTestPlanTask.ClientApp app : JmeterTestPlanTask.ClientApp.values())
+        apps[ctr++] = app.getId();
+    }
+
+    for (String app : apps) {
+
+      int interval = Integer.valueOf(amsJmeterProperties.getProperty("get-interval"));
+      String intervalString = amsJmeterProperties.getProperty(app + "-get-interval");
+      if (intervalString != null && !intervalString.isEmpty()) {
+        interval = Integer.valueOf(intervalString);
+      }
+      appGetMetrics.add(new AppGetMetric(readMetricsFromFile(app), interval, app));
+    }
+
+    return appGetMetrics;
+  }
+
+  public void runTest(int numInstances) {
+
+    int appRefreshRate = Integer.valueOf(amsJmeterProperties.getProperty("app-refresh-rate"));
+    for (int i = 0; i < numInstances; i++) {
+      ScheduledFuture future = scheduledExecutorService.scheduleAtFixedRate(new JmeterTestPlanTask(appGetMetrics,
+        amsJmeterProperties), 0, appRefreshRate, TimeUnit.MILLISECONDS);
+    }
+  }
+
+  /**
+   * Sample Usage:
+   * java -cp "lib/*":ambari-metrics-timelineservice-2.1.1.0.jar org.apache.hadoop.yarn.server.applicationhistoryservice
+   * .metrics.loadsimulator.jmeter.AMSJMeterLoadTest
+   * -t UI -p ambari-metrics-timelineservice/src/main/resources/jmeter/ams-jmeter.properties
+   */
+  public static void main(String[] args) {
+    Map<String, String> mapArgs = parseArgs(args);
+    String testType = mapArgs.get("type");
+    String amsJmeterPropertiesFile = mapArgs.get("amsJmeterPropertiesFile");
+    new AMSJMeterLoadTest(testType, amsJmeterPropertiesFile);
+  }
+
+  private static Map<String, String> parseArgs(String[] args) {
+    Map<String, String> mapProps = new HashMap<String, String>();
+    if (args.length == 0) {
+      printUsage();
+      throw new RuntimeException("Unexpected argument, See usage message.");
+    } else {
+      for (int i = 0; i < args.length; i += 2) {
+        String arg = args[i];
+        if (arg.equals("-t")) {
+          mapProps.put("type", args[i + 1]);
+        } else if (arg.equals("-p")) {
+          mapProps.put("amsJmeterPropertiesFile", args[i + 1]);
+        } else {
+          printUsage();
+          throw new IllegalArgumentException("Unexpected argument, See usage message.");
+        }
+      }
+    }
+    return mapProps;
+  }
+
+  public static void printUsage() {
+    System.err.println("Usage: java AMSJmeterLoadTest [OPTIONS]");
+    System.err.println("Options: ");
+    System.err.println("[-t type (S=>Sink/U=>UI)] [-p amsJmeterPropertiesFile (Optional)]");
+  }
+
+}
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/AppGetMetric.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/AppGetMetric.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/AppGetMetric.java
new file mode 100644
index 0000000..727a1c7
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/AppGetMetric.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.loadsimulator.jmetertest;
+
+import java.util.List;
+
+public class AppGetMetric {
+
+  private String app;
+  private int interval;
+  private List<GetMetricRequestInfo> requests;
+
+  public AppGetMetric(List<GetMetricRequestInfo> requests, int interval, String app) {
+    this.setMetricRequests(requests);
+    this.setInterval(interval);
+    this.setApp(app);
+  }
+
+  public List<GetMetricRequestInfo> getMetricRequests() {
+    return requests;
+  }
+
+  public void setMetricRequests(List<GetMetricRequestInfo> requests) {
+    this.requests = requests;
+  }
+
+  public int getInterval() {
+    return interval;
+  }
+
+  public void setInterval(int interval) {
+    this.interval = interval;
+  }
+
+  public String getApp() {
+    return app;
+  }
+
+  public void setApp(String app) {
+    this.app = app;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/GetMetricRequestInfo.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/GetMetricRequestInfo.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/GetMetricRequestInfo.java
new file mode 100644
index 0000000..26c5025
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/GetMetricRequestInfo.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.loadsimulator.jmetertest;
+
+import org.apache.commons.lang.StringUtils;
+
+import java.util.List;
+
+
+public class GetMetricRequestInfo {
+
+  private String metricStringPayload;
+  private boolean needsTimestamps;
+  private boolean needsHost;
+
+  public GetMetricRequestInfo(List<String> metrics, boolean needsTimestamps, boolean needsHost) {
+
+    this.setMetricStringPayload(StringUtils.join(metrics, ","));
+    this.setNeedsTimestamps(needsTimestamps);
+    this.setNeedsHost(needsHost);
+  }
+
+  public String getMetricStringPayload() {
+    return metricStringPayload;
+  }
+
+  public void setMetricStringPayload(String metricStringPayload) {
+    this.metricStringPayload = metricStringPayload;
+  }
+
+  public boolean needsTimestamps() {
+    return needsTimestamps;
+  }
+
+  public void setNeedsTimestamps(boolean needsTimestamps) {
+    this.needsTimestamps = needsTimestamps;
+  }
+
+  public boolean needsHost() {
+    return needsHost;
+  }
+
+  public void setNeedsHost(boolean needsHost) {
+    this.needsHost = needsHost;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/JmeterTestPlanTask.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/JmeterTestPlanTask.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/JmeterTestPlanTask.java
new file mode 100644
index 0000000..f7e27b1
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/jmetertest/JmeterTestPlanTask.java
@@ -0,0 +1,269 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.loadsimulator.jmetertest;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.jmeter.control.LoopController;
+import org.apache.jmeter.engine.StandardJMeterEngine;
+import org.apache.jmeter.protocol.http.sampler.HTTPSampler;
+import org.apache.jmeter.protocol.http.util.HTTPConstants;
+import org.apache.jmeter.reporters.ResultCollector;
+import org.apache.jmeter.reporters.Summariser;
+import org.apache.jmeter.testelement.TestElement;
+import org.apache.jmeter.testelement.TestPlan;
+import org.apache.jmeter.threads.JMeterContextService;
+import org.apache.jmeter.threads.ThreadGroup;
+import org.apache.jmeter.timers.ConstantTimer;
+import org.apache.jmeter.util.JMeterUtils;
+import org.apache.jorphan.collections.HashTree;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.*;
+import java.lang.reflect.Field;
+import java.util.*;
+
+public class JmeterTestPlanTask implements Runnable {
+
+  private static StandardJMeterEngine jmeterEngine = null;
+  private final static Logger LOG = LoggerFactory.getLogger(JmeterTestPlanTask.class);
+  private List<AppGetMetric> appGetMetrics;
+  private Properties amsJmeterProperties;
+  private HashTree amsTestPlanTree;
+  private TestPlan amsTestPlan;
+  private static final String JMETER_HOME = "loadsimulator";
+  private static final String JMETER_PROPERTIES_FILE = JMETER_HOME + "/jmeter.properties";
+  private static final String SAVESERVICE_PROPERTIES_FILE = JMETER_HOME + "/saveservice.properties";
+
+  public enum ClientApp {
+    HOST("HOST"),
+    NAMENODE("NAMENODE"),
+    HBASE("HBASE"),
+    NIMBUS("NIMBUS"),
+    KAFKA_BROKER("KAFKA_BROKER"),
+    FLUME_HANDLER("FLUME_HANDLER"),
+    AMS_HBASE("AMS-HBASE"),
+    NODEMANAGER("NODEMANAGER"),
+    RESOURCEMANAGER("RESOURCEMANAGER"),
+    DATANODE("DATANODE");
+
+    private String id;
+
+    private ClientApp(String id) {
+      this.id = id;
+    }
+
+    public String getId() {
+      return id;
+    }
+  }
+
+  public JmeterTestPlanTask(List<AppGetMetric> appGetMetrics, Properties amsJmeterProperties) {
+    this.appGetMetrics = appGetMetrics;
+    this.amsJmeterProperties = amsJmeterProperties;
+    amsTestPlanTree = new HashTree();
+    amsTestPlan = new TestPlan("AMS JMeter Load Test plan");
+    System.out.println("Starting AMS Jmeter load testing");
+  }
+
+  public void run() {
+    if (jmeterEngine != null) {
+
+      Object[] threadGroups = amsTestPlanTree.getArray(amsTestPlan);
+      for (Object threadGroupObj : threadGroups) {
+        if (threadGroupObj instanceof ThreadGroup) {
+          ThreadGroup threadGroup = (ThreadGroup) threadGroupObj;
+          threadGroup.stop();
+        }
+      }
+      amsTestPlanTree.clear();
+      jmeterEngine.askThreadsToStop();
+      jmeterEngine.stopTest();
+      JMeterContextService.endTest();
+    }
+
+    //Start the new test plan for the new app.
+    try {
+      //Initialize Jmeter essentials
+      jmeterEngine = new StandardJMeterEngine();
+      JMeterContextService.getContext().setEngine(jmeterEngine);
+
+      //Workaround to supply JMeterUtils with jmeter.prooperties from JAR.
+      JMeterUtils.setJMeterHome("");
+      Field f = new JMeterUtils().getClass().getDeclaredField("appProperties");
+      f.setAccessible(true);
+      f.set(null, AMSJMeterLoadTest.readProperties(JMETER_PROPERTIES_FILE));
+
+      //Copy saveservices.properties file to tmp dir for JMeter to consume.
+      InputStream inputStream = ClassLoader.getSystemResourceAsStream(SAVESERVICE_PROPERTIES_FILE);
+      if (inputStream == null) {
+        inputStream = new FileInputStream(SAVESERVICE_PROPERTIES_FILE);
+      }
+      String tmpDir = System.getProperty("java.io.tmpdir");
+      OutputStream outputStream = new FileOutputStream(tmpDir + "/saveservice.properties");
+      IOUtils.copy(inputStream, outputStream);
+      outputStream.close();
+      JMeterUtils.setProperty("saveservice_properties", tmpDir + "/saveservice.properties");
+
+      //Initialize Test plan
+      amsTestPlan.setProperty(TestElement.TEST_CLASS, TestPlan.class.getName());
+      amsTestPlanTree.add("AMS Test plan", amsTestPlan);
+
+      //Choose a random APP to run the perform GET metrics request.
+      int currentAppIndex = new Random().nextInt(appGetMetrics.size());
+
+      //Create ThreadGroup for the App
+      createThreadGroupHashTree(currentAppIndex, amsJmeterProperties, amsTestPlanTree, amsTestPlan);
+
+      //Geneates the JMX file that you can use through the GUI mode.
+      //SaveService.saveTree(amsTestPlanTree, new FileOutputStream(JMETER_HOME + "/" + "amsTestPlan.jmx"));
+
+      //Summarizer output to get test progress in stdout like.
+      Summariser summariser = null;
+      String summariserName = JMeterUtils.getPropDefault("summariser.name", "summary");
+      if (summariserName.length() > 0) {
+        summariser = new Summariser(summariserName);
+      }
+
+      //Store execution results into a .jtl file
+      String jmeterLogFile = tmpDir + "/amsJmeterTestResults.jtl";
+      ResultCollector resultCollector = new ResultCollector(summariser);
+      resultCollector.setFilename(jmeterLogFile);
+      amsTestPlanTree.add(amsTestPlanTree.getArray()[0], resultCollector);
+      jmeterEngine.configure(amsTestPlanTree);
+      jmeterEngine.run();
+
+      LOG.info("AMS Jmeter Test started up successfully");
+
+    } catch (Exception ioEx) {
+      amsTestPlanTree.clear();
+      jmeterEngine.askThreadsToStop();
+      jmeterEngine.stopTest();
+      JMeterContextService.endTest();
+      LOG.error("Error occurred while running AMS load test : " + ioEx.getMessage());
+      ioEx.printStackTrace();
+    }
+  }
+
+  private ConstantTimer createConstantTimer(int delay) {
+    ConstantTimer timer = new ConstantTimer();
+    timer.setDelay("" + delay);
+    return timer;
+  }
+
+  private Map<String, String> getAppSpecificParameters(String app, GetMetricRequestInfo request, Properties amsJmeterProperties) {
+
+    Map<String, String> parametersMap = new HashMap<String, String>();
+    String hostPrefix = amsJmeterProperties.getProperty("host-prefix");
+    String hostSuffix = amsJmeterProperties.getProperty("host-suffix");
+    int minHostIndex = Integer.valueOf(amsJmeterProperties.getProperty("min-host-index"));
+    int numHosts = Integer.valueOf(amsJmeterProperties.getProperty("num-hosts"));
+
+    parametersMap.put("appId", app);
+
+    if (request.needsTimestamps()) {
+      long currentTime = System.currentTimeMillis();
+      long oneHourBack = currentTime - 3600 * 1000;
+      parametersMap.put("startTime", String.valueOf(oneHourBack));
+      parametersMap.put("endTime", String.valueOf(currentTime));
+    }
+
+    if (request.needsHost()) {
+      if (ClientApp.AMS_HBASE.getId().equals(app)) {
+        parametersMap.put("hostname", amsJmeterProperties.getProperty("ams-host"));
+      } else if (ClientApp.HOST.getId().equals(app) || ClientApp.NODEMANAGER.getId().equals(app)) {
+        int randomHost = minHostIndex + new Random().nextInt(numHosts);
+        parametersMap.put("hostname", hostPrefix + randomHost + hostSuffix);
+      } else {
+        parametersMap.put("hostname", hostPrefix + amsJmeterProperties.getProperty(app + "-host") + hostSuffix);
+      }
+    }
+    parametersMap.put("metricNames", request.getMetricStringPayload());
+    return parametersMap;
+  }
+
+  private void createThreadGroupHashTree(int appIndex, Properties amsJmeterProperties, HashTree amsTestPlanTree, TestPlan amsTestPlan) {
+
+    AppGetMetric appGetMetric = appGetMetrics.get(appIndex);
+    String app = appGetMetric.getApp();
+    int interval = appGetMetric.getInterval();
+
+    //Read and validate AMS information.
+    String[] amsHostPort = amsJmeterProperties.getProperty("ams-host-port").split(":");
+    String amsHost = amsHostPort[0];
+    String amsPath = amsJmeterProperties.getProperty("ams-path");
+    int amsPort = Integer.valueOf(amsHostPort[1]);
+    int numLoops = Integer.valueOf(amsJmeterProperties.getProperty("num-get-calls-per-app"));
+
+    LoopController loopController = createLoopController(app + " GET loop controller", numLoops, false);
+    for (GetMetricRequestInfo request : appGetMetric.getMetricRequests()) {
+
+      ThreadGroup threadGroup = createThreadGroup(app + " GET threadGroup", 1, 0, loopController);
+
+      HashTree threadGroupHashTree = amsTestPlanTree.add(amsTestPlan, threadGroup);
+      Map<String, String> parametersMap = getAppSpecificParameters(app, request, amsJmeterProperties);
+
+      HTTPSampler sampler = createGetSampler("GET " + app + " metrics", amsHost, amsPort, amsPath, null, parametersMap);
+
+      if (numLoops > 1) {
+        threadGroupHashTree.add(createConstantTimer(interval));
+      }
+
+      threadGroupHashTree.add(sampler);
+    }
+  }
+
+  private HTTPSampler createGetSampler(String name, String domain, int port, String path, String encoding, Map<String, String> parameters) {
+
+    HTTPSampler sampler = new HTTPSampler();
+    sampler.setDomain(domain);
+    sampler.setPort(port);
+    sampler.setPath(path);
+    sampler.setMethod(HTTPConstants.GET);
+
+    if (encoding != null)
+      sampler.setContentEncoding(encoding);
+
+    for (Map.Entry<String, String> entry : parameters.entrySet()) {
+      sampler.addArgument(entry.getKey(), entry.getValue());
+    }
+    sampler.setName(name);
+    return sampler;
+  }
+
+  private LoopController createLoopController(String name, int numLoops, boolean continueForever) {
+    LoopController loopController = new LoopController();
+    loopController.setLoops(numLoops);
+    loopController.setProperty(TestElement.TEST_CLASS, LoopController.class.getName());
+    loopController.initialize();
+    loopController.setContinueForever(continueForever);
+    loopController.setName(name);
+    return loopController;
+  }
+
+  private ThreadGroup createThreadGroup(String name, int numThreads, int rampUp, LoopController loopController) {
+    ThreadGroup threadGroup = new ThreadGroup();
+    threadGroup.setName(name);
+    threadGroup.setNumThreads(numThreads);
+    threadGroup.setRampUp(rampUp);
+    threadGroup.setSamplerController(loopController);
+    threadGroup.setProperty(TestElement.TEST_CLASS, ThreadGroup.class.getName());
+    return threadGroup;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java
index 8657436..c6d84c2 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java
@@ -32,7 +32,7 @@ import java.net.ProtocolException;
 public class RestMetricsSender implements MetricsSender {
   private final static Logger LOG = LoggerFactory.getLogger(RestMetricsSender.class);
 
-  private final static String COLLECTOR_URL = "http://%s:6188/ws/v1/timeline/metrics";
+  private final static String COLLECTOR_URL = "http://%s/ws/v1/timeline/metrics";
   private final String collectorServiceAddress;
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/README
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/README b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/README
new file mode 100644
index 0000000..39e5365
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/README
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ WORKING OF THE AMS LOAD SIMULATOR
+ The AMS load simulator is designed to perform load testing on a live AMS instance by performing the role of either a
+ sink or an Ambari UI instance based on how it is being invoked.
+
+  > When it acts as a host sink, it makes calls to the AMS to PUT metrics for all the services for a defined interval.
+  The simulator can also be used to start up "N" such host instances where each instance has a number of Sinks that PUT
+  metrics.
+  > When the load simulator is invoked as a UI instance, it makes GET metrics calls to the AMS in defined
+  intervals for all the services. The rate of the GET metrics call and the list of metrics requested has been designed
+  to closely match an actual Ambari UI instance. Apache JMeter API has been used to design the GET calls made to the
+  AMS.
+
+  The load simulator uses a properties file (ams-jmeter.properties) file to configure the test run. It is part of the
+  JAR in the same folder as this README file. It can also be supplied as a command line argument to the test using the
+  "-p" option. Other properties files like jmeter.properties and saveservice.properties contain JMeter internal
+  properties and need not be modified.
+
+
+ INSTRUCTIONS TO RUN THE SIMULATOR
+ 1. Modify the ams-jmeter.properties to point to your AMS host. Change the properties "num-hosts" based on how many hosts
+    need to be simulated for sinks. The GET Metric section of the properties is used for fine tuning GET call interval
+    for every APP type.
+ 2. Build the ambari-metrics-timelineservice jar.
+ 3. Invoke the test using the command as follows.
+
+   java -cp lib/*:ambari-metrics-timelineservice-<version>.jar org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.loadsimulator.jmeter.AMSJMeterLoadTest -t <S/U>
+
+   lib/* -> All the dependency JARs generated for the ambari-metrics-timelineservice JAR.
+   -t option => S-Sink simulator or U-UI simulator
+   You can use the -p <location of ams-jmeter.properties> option to pass in your own properties file.
+
+ 4. Test results will be found at <TMP_DIR>/amsTestResults.jtl.
+ 5. Open the amsJmeterGrpah.jmx file through a JMeter GUI instance and supply the results (amsTestResults.jtl) file as
+    input to the Graph to be drawn.
+
+ TESTING ON GCE
+
+ 1. Copy the JAR, libs, optional ams-jmeter.properties file to all the machines on which the test needs to be run.
+ 2. Sink simulation for num-hosts = N.
+    Start the test with -t S on 1 machine with property "create-master=true".
+    Start the test with -t S on N-1 machines with property "create-master=false"
+ 3. UI simulation
+    Start the test with -t U on 1 or more machines.
+ 4. To stop the test after you have sufficient load testing done, the following command should be run on all machines.
+    ps axf | grep jmeter | grep -v grep | awk '{print "kill -9 " $1}' | sh
+ 5. Copy over the results file to a location with JMeter downloaded. Open the amsJmeterGraph.jmx on jmeter and browse to
+    open the results file as input to the graph.

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/ams-jmeter.properties
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/ams-jmeter.properties b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/ams-jmeter.properties
new file mode 100644
index 0000000..3353d43
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/ams-jmeter.properties
@@ -0,0 +1,56 @@
+# Copyright 2011 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#AMS information
+ams-host-port=104.196.94.27:6188
+ams-path=/ws/v1/timeline/metrics
+
+# Host Information, using the host-prefix, num-hosts and <MASTER>-host
+# Hosts in this case will be TestHost0, TestHost1
+# For example NAMENODE, NIMBUS,HBASE MASTER,RESOURCEMANAGER host will be TestHost0
+host-prefix=TestHost
+host-suffix=
+min-host-index=0
+num-hosts=2
+NAMENODE-host=0
+NIMBUS-host=0
+HBASE-host=0
+RESOURCEMANAGER-host=0
+
+# PUT Metric / Sinks config
+collection-interval=1000
+send-interval=3000
+create-master=true
+
+# GET Metric / Client Apps config
+num-ui-instances=1
+apps-to-test=
+app-refresh-rate=8000
+num-get-calls-per-app=3
+get-interval=3000
+HOST-get-interval=3000
+NAMENODE-get-interval=2000
+HBASE-get-interval=3000
+NIMBUS-get-interval=2000
+AMS-HBASE-get-interval=2000
+FLUME_HANDLER-get-interval=2000
+NODEMANAGER-get-interval=2000
+KAFKA_BROKER-get-interval=2000
+DATANODE-get-interval=3000
+RESOURCEMANAGER-get-interval=3000
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/amsJmeterGraph.jmx
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/amsJmeterGraph.jmx b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/amsJmeterGraph.jmx
new file mode 100644
index 0000000..06d6360
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/loadsimulator/amsJmeterGraph.jmx
@@ -0,0 +1,104 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<jmeterTestPlan version="1.2" properties="2.8" jmeter="2.13 r1665067">
+  <hashTree>
+    <TestPlan guiclass="TestPlanGui" testclass="TestPlan" testname="Test Plan" enabled="true">
+      <stringProp name="TestPlan.comments"></stringProp>
+      <boolProp name="TestPlan.functional_mode">false</boolProp>
+      <boolProp name="TestPlan.serialize_threadgroups">false</boolProp>
+      <elementProp name="TestPlan.user_defined_variables" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+        <collectionProp name="Arguments.arguments"/>
+      </elementProp>
+      <stringProp name="TestPlan.user_define_classpath"></stringProp>
+    </TestPlan>
+    <hashTree>
+      <ResultCollector guiclass="GraphVisualizer" testclass="ResultCollector" testname="Latency Graph" enabled="true">
+        <boolProp name="ResultCollector.error_logging">false</boolProp>
+        <objProp>
+          <name>saveConfig</name>
+          <value class="SampleSaveConfiguration">
+            <time>false</time>
+            <latency>false</latency>
+            <timestamp>true</timestamp>
+            <success>true</success>
+            <label>true</label>
+            <code>true</code>
+            <message>true</message>
+            <threadName>false</threadName>
+            <dataType>true</dataType>
+            <encoding>false</encoding>
+            <assertions>false</assertions>
+            <subresults>false</subresults>
+            <responseData>true</responseData>
+            <samplerData>true</samplerData>
+            <xml>true</xml>
+            <fieldNames>true</fieldNames>
+            <responseHeaders>true</responseHeaders>
+            <requestHeaders>true</requestHeaders>
+            <responseDataOnError>true</responseDataOnError>
+            <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+            <assertionsResultsToSave>0</assertionsResultsToSave>
+            <url>true</url>
+            <hostname>true</hostname>
+            <sampleCount>true</sampleCount>
+          </value>
+        </objProp>
+        <stringProp name="filename">/tmp/amsJmeterTestResults.jtl</stringProp>
+      </ResultCollector>
+      <hashTree/>
+      <ResultCollector guiclass="RespTimeGraphVisualizer" testclass="ResultCollector" testname="Response Time Graph" enabled="true">
+        <boolProp name="ResultCollector.error_logging">false</boolProp>
+        <objProp>
+          <name>saveConfig</name>
+          <value class="SampleSaveConfiguration">
+            <time>false</time>
+            <latency>false</latency>
+            <timestamp>true</timestamp>
+            <success>true</success>
+            <label>true</label>
+            <code>true</code>
+            <message>true</message>
+            <threadName>false</threadName>
+            <dataType>true</dataType>
+            <encoding>false</encoding>
+            <assertions>false</assertions>
+            <subresults>false</subresults>
+            <responseData>true</responseData>
+            <samplerData>true</samplerData>
+            <xml>true</xml>
+            <fieldNames>true</fieldNames>
+            <responseHeaders>true</responseHeaders>
+            <requestHeaders>true</requestHeaders>
+            <responseDataOnError>true</responseDataOnError>
+            <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+            <assertionsResultsToSave>0</assertionsResultsToSave>
+            <url>true</url>
+            <hostname>true</hostname>
+            <sampleCount>true</sampleCount>
+          </value>
+        </objProp>
+        <stringProp name="filename">/tmp/amsJmeterTestResults.jtl</stringProp>
+        <stringProp name="RespTimeGraph.interval">15000</stringProp>
+        <intProp name="RespTimeGraph.linestrockwidth">2</intProp>
+        <intProp name="RespTimeGraph.lineshapepoint">4</intProp>
+      </ResultCollector>
+      <hashTree/>
+    </hashTree>
+  </hashTree>
+</jmeterTestPlan>


[08/50] [abbrv] ambari git commit: AMBARI-13479: Handle derivation of hawq's yarn related parameters where yarn is not configured (bhuvnesh2703 via jaoki)

Posted by nc...@apache.org.
AMBARI-13479: Handle derivation of hawq's yarn related parameters where yarn is not configured (bhuvnesh2703 via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/844b1e9d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/844b1e9d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/844b1e9d

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 844b1e9d3effd2d9d54d8b75532d208e1c629e40
Parents: a5162a7
Author: Jun Aoki <ja...@apache.org>
Authored: Wed Oct 21 15:22:07 2015 -0700
Committer: Jun Aoki <ja...@apache.org>
Committed: Wed Oct 21 15:22:07 2015 -0700

----------------------------------------------------------------------
 ambari-web/app/utils/configs/config_property_helper.js | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/844b1e9d/ambari-web/app/utils/configs/config_property_helper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/config_property_helper.js b/ambari-web/app/utils/configs/config_property_helper.js
index 3cafc9f..5dd5149 100644
--- a/ambari-web/app/utils/configs/config_property_helper.js
+++ b/ambari-web/app/utils/configs/config_property_helper.js
@@ -236,15 +236,15 @@ module.exports = {
         this.setRecommendedValue(configProperty, hostWithPort, nnHost);
         break;
       case 'hawq_resourcemanager_yarn_resourcemanager_address':
-        var rmHost = masterComponentHostsInDB.findProperty('component', 'RESOURCEMANAGER').hostName;
+        var rmHost = masterComponentHostsInDB.findProperty('component', 'RESOURCEMANAGER');
         if (rmHost) {
-          this.setRecommendedValue(configProperty, hostWithPort, rmHost);
+          this.setRecommendedValue(configProperty, hostWithPort, rmHost.hostName);
         }
         break;
       case 'hawq_resourcemanager_yarn_resourcemanager_scheduler_address':
-        var rmHost = masterComponentHostsInDB.findProperty('component', 'RESOURCEMANAGER').hostName;
+        var rmHost = masterComponentHostsInDB.findProperty('component', 'RESOURCEMANAGER');
         if (rmHost) {
-          this.setRecommendedValue(configProperty, hostWithPort, rmHost);
+          this.setRecommendedValue(configProperty, hostWithPort, rmHost.hostName);
         }
         break;
     }


[03/50] [abbrv] ambari git commit: AMBARI-13502. Blank Tez View page is displayed if ResourceManagers are down

Posted by nc...@apache.org.
AMBARI-13502. Blank Tez View page is displayed if ResourceManagers are down


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/66f8ad41
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/66f8ad41
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/66f8ad41

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 66f8ad413aa94a034fc3e5477fd8c37177587d07
Parents: 0a9df8c
Author: Alex Antonenko <hi...@gmail.com>
Authored: Wed Oct 21 19:18:21 2015 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Wed Oct 21 20:02:35 2015 +0300

----------------------------------------------------------------------
 .../tez/src/main/resources/ui/scripts/init-ambari-view.js      | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/66f8ad41/contrib/views/tez/src/main/resources/ui/scripts/init-ambari-view.js
----------------------------------------------------------------------
diff --git a/contrib/views/tez/src/main/resources/ui/scripts/init-ambari-view.js b/contrib/views/tez/src/main/resources/ui/scripts/init-ambari-view.js
index 615c727..bd2e9a6 100644
--- a/contrib/views/tez/src/main/resources/ui/scripts/init-ambari-view.js
+++ b/contrib/views/tez/src/main/resources/ui/scripts/init-ambari-view.js
@@ -161,7 +161,7 @@ App.Helpers.ambari = (function () {
         context: this,
         url: getURL(),
         success: this.getInstanceParametersSuccessCallback,
-        error: this.getInstanceParametersErrorCallback,
+        error: this.getInstanceParametersErrorCallback
       });
     },
 
@@ -180,7 +180,9 @@ App.Helpers.ambari = (function () {
      * @method getInstanceParametersErrorCallback
      */
     getInstanceParametersErrorCallback: function (request, ajaxOptions, error) {
-      Ember.assert('Ambari instance parameter fetch failed: ' + error);
+      var message = 'Ambari instance parameter fetch failed: ' + error;
+      App.Helpers.ErrorBar.getInstance().show(message);
+      Ember.assert(message);
     }
   };
 


[06/50] [abbrv] ambari git commit: AMBARI-13335. Install package command during RU should honor excluded packages - temporary workaround (dlysnichenko)

Posted by nc...@apache.org.
AMBARI-13335. Install package command during RU should honor excluded packages - temporary workaround (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/85bd7cdb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/85bd7cdb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/85bd7cdb

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 85bd7cdbc01da56b9d16abd29f18153cefcdead6
Parents: d259917
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Wed Oct 7 14:44:14 2015 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Wed Oct 21 20:12:35 2015 +0300

----------------------------------------------------------------------
 ambari-server/conf/unix/ambari.properties       |  1 +
 .../server/configuration/Configuration.java     | 20 ++++++++++++++++++++
 .../ClusterStackVersionResourceProvider.java    | 13 +++++++++++--
 .../HostStackVersionResourceProvider.java       | 13 +++++++++++--
 4 files changed, 43 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/85bd7cdb/ambari-server/conf/unix/ambari.properties
----------------------------------------------------------------------
diff --git a/ambari-server/conf/unix/ambari.properties b/ambari-server/conf/unix/ambari.properties
index 53af453..58f09d1 100644
--- a/ambari-server/conf/unix/ambari.properties
+++ b/ambari-server/conf/unix/ambari.properties
@@ -96,6 +96,7 @@ skip.service.checks=false
 
 rolling.upgrade.min.stack=HDP-2.2
 rolling.upgrade.max.stack=
+rolling.upgrade.skip.packages.prefixes=
 
 # HTTP Header settings for Ambari Server UI
 http.strict-transport-security=max-age=31536000

http://git-wip-us.apache.org/repos/asf/ambari/blob/85bd7cdb/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index 702e12d..04ab1ef 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -21,8 +21,10 @@ import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
+import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
@@ -179,8 +181,10 @@ public class Configuration {
   public static final String SERVER_JDBC_PROPERTIES_PREFIX = "server.jdbc.properties.";
   public static final String ROLLING_UPGRADE_MIN_STACK_KEY = "rolling.upgrade.min.stack";
   public static final String ROLLING_UPGRADE_MAX_STACK_KEY = "rolling.upgrade.max.stack";
+  public static final String ROLLING_UPGRADE_SKIP_PACKAGES_PREFIXES_KEY = "rolling.upgrade.skip.packages.prefixes";
   public static final String ROLLING_UPGRADE_MIN_STACK_DEFAULT = "HDP-2.2";
   public static final String ROLLING_UPGRADE_MAX_STACK_DEFAULT = "";
+  public static final String ROLLING_UPGRADE_SKIP_PACKAGES_PREFIXES_DEFAULT = "";
 
   public static final String SERVER_JDBC_CONNECTION_POOL = "server.jdbc.connection-pool";
   public static final String SERVER_JDBC_CONNECTION_POOL_MIN_SIZE = "server.jdbc.connection-pool.min-size";
@@ -923,6 +927,22 @@ public class Configuration {
   }
 
   /**
+   * @return a list of prefixes. Packages whose name starts with any of these
+   * prefixes, should be skipped during upgrade.
+   */
+  public List<String> getRollingUpgradeSkipPackagesPrefixes() {
+    String propertyValue = properties.getProperty(ROLLING_UPGRADE_SKIP_PACKAGES_PREFIXES_KEY,
+            ROLLING_UPGRADE_SKIP_PACKAGES_PREFIXES_DEFAULT);
+    ArrayList<String> res = new ArrayList<>();
+    for (String prefix : propertyValue.split(",")) {
+      if (! prefix.isEmpty()) {
+        res.add(prefix.trim());
+      }
+    }
+    return res;
+  }
+
+  /**
    * Get the map with server config parameters.
    * Keys - public constants of this class
    * @return the map with server config parameters

http://git-wip-us.apache.org/repos/asf/ambari/blob/85bd7cdb/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index abd9f4a..2a1431d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -474,7 +474,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     for (ServiceComponentHost component : components) {
       servicesOnHost.add(component.getServiceName());
     }
-
+    List<String> blacklistedPackagePrefixes = configuration.getRollingUpgradeSkipPackagesPrefixes();
     for (String serviceName : servicesOnHost) {
       ServiceInfo info;
       try {
@@ -488,7 +488,16 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
               host.getOsFamily());
       for (ServiceOsSpecific.Package aPackage : packagesForService) {
         if (! aPackage.getSkipUpgrade()) {
-          packages.add(aPackage);
+          boolean blacklisted = false;
+          for(String prefix : blacklistedPackagePrefixes) {
+            if (aPackage.getName().startsWith(prefix)) {
+              blacklisted = true;
+              break;
+            }
+          }
+          if (! blacklisted) {
+            packages.add(aPackage);
+          }
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/85bd7cdb/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
index a09edd0..43a4423 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
@@ -367,7 +367,7 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
     for (ServiceComponentHost component : components) {
       servicesOnHost.add(component.getServiceName());
     }
-
+    List<String> blacklistedPackagePrefixes = configuration.getRollingUpgradeSkipPackagesPrefixes();
     for (String serviceName : servicesOnHost) {
       ServiceInfo info;
       try {
@@ -380,7 +380,16 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
               host.getOsFamily());
       for (ServiceOsSpecific.Package aPackage : packagesForService) {
         if (! aPackage.getSkipUpgrade()) {
-          packages.add(aPackage);
+          boolean blacklisted = false;
+          for(String prefix : blacklistedPackagePrefixes) {
+            if (aPackage.getName().startsWith(prefix)) {
+              blacklisted = true;
+              break;
+            }
+          }
+          if (! blacklisted) {
+            packages.add(aPackage);
+          }
         }
       }
     }


[20/50] [abbrv] ambari git commit: https://issues.apache.org/jira/browse/AMBARI-13450. AMBARI-13450 Bootstrap Cluster via different SSH Port Number (Selim Ozcan via aonishuk)

Posted by nc...@apache.org.
https://issues.apache.org/jira/browse/AMBARI-13450. AMBARI-13450 Bootstrap Cluster via different SSH Port Number (Selim Ozcan via aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/39c04ac9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/39c04ac9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/39c04ac9

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 39c04ac9b44f4eb1035b378337d745d77ee83d53
Parents: 565dc0d
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Thu Oct 22 16:08:48 2015 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Thu Oct 22 16:08:48 2015 +0300

----------------------------------------------------------------------
 .../ambari/server/bootstrap/BSRunner.java       |  28 +++--
 .../ambari/server/bootstrap/SshHostInfo.java    |   7 ++
 ambari-server/src/main/python/bootstrap.py      |  66 ++++++------
 ambari-server/src/test/python/TestBootstrap.py  | 102 +++++++++----------
 ambari-web/app/controllers/wizard.js            |   2 +
 .../app/controllers/wizard/step2_controller.js  |  25 ++++-
 .../app/controllers/wizard/step3_controller.js  |   2 +
 ambari-web/app/messages.js                      |   3 +
 ambari-web/app/styles/application.less          |   4 +
 ambari-web/app/templates/wizard/step2.hbs       |  12 +++
 .../test/controllers/wizard/step2_test.js       |  45 ++++++++
 .../test/controllers/wizard/step3_test.js       |   4 +
 12 files changed, 205 insertions(+), 95 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/39c04ac9/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java b/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java
index 0a55131..44faa4f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java
@@ -41,6 +41,7 @@ class BSRunner extends Thread {
   private static Log LOG = LogFactory.getLog(BSRunner.class);
 
   private static final String DEFAULT_USER = "root";
+  private static final String DEFAULT_SSHPORT = "22";
 
   private  boolean finished = false;
   private SshHostInfo sshHostInfo;
@@ -164,7 +165,13 @@ class BSRunner extends Thread {
     if (user == null || user.isEmpty()) {
       user = DEFAULT_USER;
     }
-    String command[] = new String[12];
+
+    String sshPort = sshHostInfo.getSshPort();
+    if(sshPort == null || sshPort.isEmpty()){
+       sshPort = DEFAULT_SSHPORT;
+    }
+
+    String command[] = new String[13];
     BSStat stat = BSStat.RUNNING;
     String scriptlog = "";
     try {
@@ -194,14 +201,15 @@ class BSRunner extends Thread {
       command[1] = hostString;
       command[2] = this.requestIdDir.toString();
       command[3] = user;
-      command[4] = this.sshKeyFile.toString();
-      command[5] = this.agentSetupScript.toString();
-      command[6] = this.ambariHostname;
-      command[7] = this.clusterOsFamily;
-      command[8] = this.projectVersion;
-      command[9] = this.serverPort+"";
-      command[10] = userRunAs;
-      command[11] = (this.passwordFile==null) ? "null" : this.passwordFile.toString();
+      command[4] = sshPort;
+      command[5] = this.sshKeyFile.toString();
+      command[6] = this.agentSetupScript.toString();
+      command[7] = this.ambariHostname;
+      command[8] = this.clusterOsFamily;
+      command[9] = this.projectVersion;
+      command[10] = this.serverPort+"";
+      command[11] = userRunAs;
+      command[12] = (this.passwordFile==null) ? "null" : this.passwordFile.toString();
 
       Map<String, String> envVariables = new HashMap<String, String>();
 
@@ -218,7 +226,7 @@ class BSRunner extends Thread {
       }
 
       LOG.info("Host= " + hostString + " bs=" + this.bsScript + " requestDir=" +
-          requestIdDir + " user=" + user + " keyfile=" + this.sshKeyFile +
+          requestIdDir + " user=" + user + " sshPort=" + sshPort + " keyfile=" + this.sshKeyFile +
           " passwordFile " + this.passwordFile + " server=" + this.ambariHostname +
           " version=" + projectVersion + " serverPort=" + this.serverPort + " userRunAs=" + userRunAs);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/39c04ac9/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/SshHostInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/SshHostInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/SshHostInfo.java
index 822e972..9a7490f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/SshHostInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/SshHostInfo.java
@@ -49,6 +49,9 @@ public class SshHostInfo {
   private String user;
 
   @XmlElement
+  private String sshPort;
+
+  @XmlElement
   private String password;
   
   @XmlElement
@@ -86,6 +89,10 @@ public class SshHostInfo {
     this.user = user;
   }
 
+  public String getSshPort(){ return sshPort; }
+
+  public void setSshPort(String sshPort){ this.sshPort = sshPort; }
+
   public String getPassword() {
     return password;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/39c04ac9/ambari-server/src/main/python/bootstrap.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/bootstrap.py b/ambari-server/src/main/python/bootstrap.py
index 3eba75c..75bb26a 100755
--- a/ambari-server/src/main/python/bootstrap.py
+++ b/ambari-server/src/main/python/bootstrap.py
@@ -74,8 +74,9 @@ class HostLog:
 class SCP:
   """ SCP implementation that is thread based. The status can be returned using
    status val """
-  def __init__(self, user, sshkey_file, host, inputFile, remote, bootdir, host_log):
+  def __init__(self, user, sshPort, sshkey_file, host, inputFile, remote, bootdir, host_log):
     self.user = user
+    self.sshPort = sshPort
     self.sshkey_file = sshkey_file
     self.host = host
     self.inputFile = inputFile
@@ -90,7 +91,7 @@ class SCP:
                   "-r",
                   "-o", "ConnectTimeout=60",
                   "-o", "BatchMode=yes",
-                  "-o", "StrictHostKeyChecking=no",
+                  "-o", "StrictHostKeyChecking=no", "-P", self.sshPort,
                   "-i", self.sshkey_file, self.inputFile, self.user + "@" +
                                                          self.host + ":" + self.remote]
     if DEBUG:
@@ -111,8 +112,9 @@ class SCP:
 
 class SSH:
   """ Ssh implementation of this """
-  def __init__(self, user, sshkey_file, host, command, bootdir, host_log, errorMessage = None):
+  def __init__(self, user, sshPort, sshkey_file, host, command, bootdir, host_log, errorMessage = None):
     self.user = user
+    self.sshPort = sshPort
     self.sshkey_file = sshkey_file
     self.host = host
     self.command = command
@@ -128,7 +130,7 @@ class SSH:
                   "-o", "StrictHostKeyChecking=no",
                   "-o", "BatchMode=yes",
                   "-tt", # Should prevent "tput: No value for $TERM and no -T specified" warning
-                  "-i", self.sshkey_file,
+                  "-i", self.sshkey_file, "-p", self.sshPort,
                   self.user + "@" + self.host, self.command]
     if DEBUG:
       self.host_log.write("Running ssh command " + ' '.join(sshcommand))
@@ -454,7 +456,7 @@ class BootstrapDefault(Bootstrap):
     params = self.shared_state
     self.host_log.write("==========================\n")
     self.host_log.write("Copying OS type check script...")
-    scp = SCP(params.user, params.sshkey_file, self.host, fileToCopy,
+    scp = SCP(params.user, params.sshPort, params.sshkey_file, self.host, fileToCopy,
               target, params.bootdir, self.host_log)
     result = scp.run()
     self.host_log.write("\n")
@@ -467,7 +469,7 @@ class BootstrapDefault(Bootstrap):
     params = self.shared_state
     self.host_log.write("==========================\n")
     self.host_log.write("Copying common functions script...")
-    scp = SCP(params.user, params.sshkey_file, self.host, fileToCopy,
+    scp = SCP(params.user, params.sshPort, params.sshkey_file, self.host, fileToCopy,
               target, params.bootdir, self.host_log)
     result = scp.run()
     self.host_log.write("\n")
@@ -507,7 +509,7 @@ class BootstrapDefault(Bootstrap):
     if (os.path.exists(fileToCopy)):
       self.host_log.write("==========================\n")
       self.host_log.write("Copying repo file to 'tmp' folder...")
-      scp = SCP(params.user, params.sshkey_file, self.host, fileToCopy,
+      scp = SCP(params.user, params.sshPort, params.sshkey_file, self.host, fileToCopy,
                 target, params.bootdir, self.host_log)
       retcode1 = scp.run()
       self.host_log.write("\n")
@@ -517,7 +519,7 @@ class BootstrapDefault(Bootstrap):
       self.host_log.write("Moving file to repo dir...")
       targetDir = self.getRepoDir()
       command = self.getMoveRepoFileCommand(targetDir)
-      ssh = SSH(params.user, params.sshkey_file, self.host, command,
+      ssh = SSH(params.user, params.sshPort, params.sshkey_file, self.host, command,
                 params.bootdir, self.host_log)
       retcode2 = ssh.run()
       self.host_log.write("\n")
@@ -526,7 +528,7 @@ class BootstrapDefault(Bootstrap):
       self.host_log.write("==========================\n")
       self.host_log.write("Changing permissions for ambari.repo...")
       command = self.getRepoFileChmodCommand()
-      ssh = SSH(params.user, params.sshkey_file, self.host, command,
+      ssh = SSH(params.user, params.sshPort, params.sshkey_file, self.host, command,
                 params.bootdir, self.host_log)
       retcode4 = ssh.run()
       self.host_log.write("\n")
@@ -536,7 +538,7 @@ class BootstrapDefault(Bootstrap):
         self.host_log.write("==========================\n")
         self.host_log.write("Update apt cache of repository...")
         command = self.getAptUpdateCommand()
-        ssh = SSH(params.user, params.sshkey_file, self.host, command,
+        ssh = SSH(params.user, params.sshPort, params.sshkey_file, self.host, command,
                   params.bootdir, self.host_log)
         retcode2 = ssh.run()
         self.host_log.write("\n")
@@ -554,7 +556,7 @@ class BootstrapDefault(Bootstrap):
     self.host_log.write("Copying setup script file...")
     fileToCopy = params.setup_agent_file
     target = self.getRemoteName(self.SETUP_SCRIPT_FILENAME)
-    scp = SCP(params.user, params.sshkey_file, self.host, fileToCopy,
+    scp = SCP(params.user, params.sshPort, params.sshkey_file, self.host, fileToCopy,
               target, params.bootdir, self.host_log)
     retcode3 = scp.run()
     self.host_log.write("\n")
@@ -600,7 +602,7 @@ class BootstrapDefault(Bootstrap):
               (self.getOsCheckScriptRemoteLocation(),
                PYTHON_ENV, self.getOsCheckScriptRemoteLocation(), params.cluster_os_type)
 
-    ssh = SSH(params.user, params.sshkey_file, self.host, command,
+    ssh = SSH(params.user, params.sshPort, params.sshkey_file, self.host, command,
               params.bootdir, self.host_log)
     retcode = ssh.run()
     self.host_log.write("\n")
@@ -615,7 +617,7 @@ class BootstrapDefault(Bootstrap):
       command = "dpkg --get-selections|grep -e '^sudo\s*install'"
     else:
       command = "rpm -qa | grep -e '^sudo\-'"
-    ssh = SSH(params.user, params.sshkey_file, self.host, command,
+    ssh = SSH(params.user, params.sshPort, params.sshkey_file, self.host, command,
               params.bootdir, self.host_log,
               errorMessage="Error: Sudo command is not available. "
                            "Please install the sudo command.")
@@ -627,7 +629,7 @@ class BootstrapDefault(Bootstrap):
     # Copy the password file
     self.host_log.write("Copying password file to 'tmp' folder...")
     params = self.shared_state
-    scp = SCP(params.user, params.sshkey_file, self.host, params.password_file,
+    scp = SCP(params.user, params.sshPort, params.sshkey_file, self.host, params.password_file,
               self.getPasswordFile(), params.bootdir, self.host_log)
     retcode1 = scp.run()
 
@@ -636,7 +638,7 @@ class BootstrapDefault(Bootstrap):
     # Change password file mode to 600
     self.host_log.write("Changing password file mode...")
     command = "chmod 600 " + self.getPasswordFile()
-    ssh = SSH(params.user, params.sshkey_file, self.host, command,
+    ssh = SSH(params.user, params.sshPort, params.sshkey_file, self.host, command,
               params.bootdir, self.host_log)
     retcode2 = ssh.run()
 
@@ -648,7 +650,7 @@ class BootstrapDefault(Bootstrap):
     self.host_log.write("Changing password file mode...")
     params = self.shared_state
     command = "chmod 600 " + self.getPasswordFile()
-    ssh = SSH(params.user, params.sshkey_file, self.host, command,
+    ssh = SSH(params.user, params.sshPort, params.sshkey_file, self.host, command,
               params.bootdir, self.host_log)
     retcode = ssh.run()
     self.host_log.write("Change password file mode on host finished")
@@ -659,7 +661,7 @@ class BootstrapDefault(Bootstrap):
     self.host_log.write("Deleting password file...")
     params = self.shared_state
     command = "rm " + self.getPasswordFile()
-    ssh = SSH(params.user, params.sshkey_file, self.host, command,
+    ssh = SSH(params.user, params.sshPort, params.sshkey_file, self.host, command,
               params.bootdir, self.host_log)
     retcode = ssh.run()
     self.host_log.write("Deleting password file finished")
@@ -675,7 +677,7 @@ class BootstrapDefault(Bootstrap):
     command = "sudo mkdir -p {0} ; sudo chown -R {1} {0} ; sudo chmod 755 {3} ; sudo chmod 755 {2} ; sudo chmod 777 {0}".format(
       self.TEMP_FOLDER, quote_bash_args(params.user), DEFAULT_AGENT_DATA_FOLDER, DEFAULT_AGENT_LIB_FOLDER)
 
-    ssh = SSH(params.user, params.sshkey_file, self.host, command,
+    ssh = SSH(params.user, params.sshPort, params.sshkey_file, self.host, command,
               params.bootdir, self.host_log)
     retcode = ssh.run()
     self.host_log.write("\n")
@@ -692,7 +694,7 @@ class BootstrapDefault(Bootstrap):
     self.host_log.write("==========================\n")
     self.host_log.write("Running setup agent script...")
     command = self.getRunSetupCommand(self.host)
-    ssh = SSH(params.user, params.sshkey_file, self.host, command,
+    ssh = SSH(params.user, params.sshPort, params.sshkey_file, self.host, command,
               params.bootdir, self.host_log)
     retcode = ssh.run()
     self.host_log.write("\n")
@@ -791,11 +793,12 @@ class PBootstrap:
 
 
 class SharedState:
-  def __init__(self, user, sshkey_file, script_dir, boottmpdir, setup_agent_file,
+  def __init__(self, user, sshPort, sshkey_file, script_dir, boottmpdir, setup_agent_file,
                ambari_server, cluster_os_type, ambari_version, server_port,
                user_run_as, password_file = None):
     self.hostlist_to_remove_password_file = None
     self.user = user
+    self.sshPort = sshPort
     self.sshkey_file = sshkey_file
     self.bootdir = boottmpdir
     self.script_dir = script_dir
@@ -817,7 +820,7 @@ def main(argv=None):
   onlyargs = argv[1:]
   if len(onlyargs) < 3:
     sys.stderr.write("Usage: <comma separated hosts> "
-                     "<tmpdir for storage> <user> <sshkey_file> <agent setup script>"
+                     "<tmpdir for storage> <user> <sshPort> <sshkey_file> <agent setup script>"
                      " <ambari-server name> <cluster os type> <ambari version> <ambari port> <user_run_as> <passwordFile>\n")
     sys.exit(2)
     pass
@@ -827,14 +830,15 @@ def main(argv=None):
   hostList = onlyargs[0].split(",")
   bootdir =  onlyargs[1]
   user = onlyargs[2]
-  sshkey_file = onlyargs[3]
-  setupAgentFile = onlyargs[4]
-  ambariServer = onlyargs[5]
-  cluster_os_type = onlyargs[6]
-  ambariVersion = onlyargs[7]
-  server_port = onlyargs[8]
-  user_run_as = onlyargs[9]
-  passwordFile = onlyargs[10]
+  sshPort = onlyargs[3]
+  sshkey_file = onlyargs[4]
+  setupAgentFile = onlyargs[5]
+  ambariServer = onlyargs[6]
+  cluster_os_type = onlyargs[7]
+  ambariVersion = onlyargs[8]
+  server_port = onlyargs[9]
+  user_run_as = onlyargs[10]
+  passwordFile = onlyargs[11]
 
   if not OSCheck.is_windows_family():
     # ssh doesn't like open files
@@ -845,10 +849,10 @@ def main(argv=None):
 
   logging.info("BootStrapping hosts " + pprint.pformat(hostList) +
                " using " + scriptDir + " cluster primary OS: " + cluster_os_type +
-               " with user '" + user + "' sshKey File " + sshkey_file + " password File " + passwordFile +\
+               " with user '" + user + "'with ssh Port '" + sshPort + "' sshKey File " + sshkey_file + " password File " + passwordFile +\
                " using tmp dir " + bootdir + " ambari: " + ambariServer +"; server_port: " + server_port +\
                "; ambari version: " + ambariVersion+"; user_run_as: " + user_run_as)
-  sharedState = SharedState(user, sshkey_file, scriptDir, bootdir, setupAgentFile,
+  sharedState = SharedState(user, sshPort, sshkey_file, scriptDir, bootdir, setupAgentFile,
                        ambariServer, cluster_os_type, ambariVersion,
                        server_port, user_run_as, passwordFile)
   pbootstrap = PBootstrap(hostList, sharedState)

http://git-wip-us.apache.org/repos/asf/ambari/blob/39c04ac9/ambari-server/src/test/python/TestBootstrap.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestBootstrap.py b/ambari-server/src/test/python/TestBootstrap.py
index e143a68..b60c35d 100644
--- a/ambari-server/src/test/python/TestBootstrap.py
+++ b/ambari-server/src/test/python/TestBootstrap.py
@@ -43,7 +43,7 @@ class TestBootstrap(TestCase):
 
 
   def test_getRemoteName(self):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                       "setupAgentFile", "ambariServer", "centos6", None, "8440", "root")
     res = bootstrap_obj = Bootstrap("hostname", shared_state)
     utime1 = 1234
@@ -65,7 +65,7 @@ class TestBootstrap(TestCase):
   # TODO: test_return_error_message_for_missing_sudo_package
 
   def test_getAmbariPort(self):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -82,11 +82,11 @@ class TestBootstrap(TestCase):
   @patch("os.path.dirname")
   @patch("os.path.realpath")
   def test_bootstrap_main(self, dirname_mock, realpath_mock, run_mock, exit_mock, stderr_mock, subprocess_Popen_mock):
-    bootstrap.main(["bootstrap.py", "hostname,hostname2", "/tmp/bootstrap", "root", "sshkey_file", "setupAgent.py", "ambariServer", \
+    bootstrap.main(["bootstrap.py", "hostname,hostname2", "/tmp/bootstrap", "root", "123", "sshkey_file", "setupAgent.py", "ambariServer", \
                     "centos6", "1.1.1", "8440", "root", "passwordfile"])
     self.assertTrue(run_mock.called)
     run_mock.reset_mock()
-    bootstrap.main(["bootstrap.py", "hostname,hostname2", "/tmp/bootstrap", "root", "sshkey_file", "setupAgent.py", "ambariServer", \
+    bootstrap.main(["bootstrap.py", "hostname,hostname2", "/tmp/bootstrap", "root", "123", "sshkey_file", "setupAgent.py", "ambariServer", \
                     "centos6", "1.1.1", "8440", "root", None])
     self.assertTrue(run_mock.called)
     run_mock.reset_mock()
@@ -104,7 +104,7 @@ class TestBootstrap(TestCase):
 
   @patch("os.environ")
   def test_getRunSetupWithPasswordCommand(self, environ_mock):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     environ_mock.__getitem__.return_value = "TEST_PASSPHRASE"
@@ -118,7 +118,7 @@ class TestBootstrap(TestCase):
 
 
   def test_generateRandomFileName(self):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -129,7 +129,7 @@ class TestBootstrap(TestCase):
   @patch.object(OSCheck, "is_redhat_family")
   @patch.object(OSCheck, "is_suse_family")
   def test_getRepoDir(self, is_suse_family, is_redhat_family):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -145,7 +145,7 @@ class TestBootstrap(TestCase):
     self.assertEquals(res, "/etc/yum.repos.d")
 
   def test_getSetupScript(self):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -155,7 +155,7 @@ class TestBootstrap(TestCase):
   def test_run_setup_agent_command_ends_with_project_version(self):
     os.environ[AMBARI_PASSPHRASE_VAR_NAME] = ""
     version = "1.1.1"
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                version, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -166,7 +166,7 @@ class TestBootstrap(TestCase):
   def test_agent_setup_command_without_project_version(self):
     os.environ[AMBARI_PASSPHRASE_VAR_NAME] = ""
     version = None
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                version, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -207,7 +207,7 @@ class TestBootstrap(TestCase):
 
   @patch("subprocess.Popen")
   def test_SCP(self, popenMock):
-    params = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    params = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                   "setupAgentFile", "ambariServer", "centos6",
                                   "1.2.1", "8440", "root")
     host_log_mock = MagicMock()
@@ -216,7 +216,7 @@ class TestBootstrap(TestCase):
       log['text'] = log['text'] + text
 
     host_log_mock.write.side_effect = write_side_effect
-    scp = SCP(params.user, params.sshkey_file, "dummy-host", "src/file",
+    scp = SCP(params.user, params.sshPort, params.sshkey_file, "dummy-host", "src/file",
               "dst/file", params.bootdir, host_log_mock)
     log_sample = "log_sample"
     error_sample = "error_sample"
@@ -233,7 +233,7 @@ class TestBootstrap(TestCase):
     self.assertTrue(error_sample in log['text'])
     command_str = str(popenMock.call_args[0][0])
     self.assertEquals(command_str, "['scp', '-r', '-o', 'ConnectTimeout=60', '-o', "
-        "'BatchMode=yes', '-o', 'StrictHostKeyChecking=no', '-i', 'sshkey_file',"
+        "'BatchMode=yes', '-o', 'StrictHostKeyChecking=no', '-P', '123', '-i', 'sshkey_file',"
         " 'src/file', 'root@dummy-host:dst/file']")
     self.assertEqual(retcode["exitstatus"], 0)
 
@@ -250,7 +250,7 @@ class TestBootstrap(TestCase):
 
   @patch("subprocess.Popen")
   def test_SSH(self, popenMock):
-    params = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    params = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                   "setupAgentFile", "ambariServer", "centos6",
                                   "1.2.1", "8440", "root")
     host_log_mock = MagicMock()
@@ -259,7 +259,7 @@ class TestBootstrap(TestCase):
       log['text'] = log['text'] + text
 
     host_log_mock.write.side_effect = write_side_effect
-    ssh = SSH(params.user, params.sshkey_file, "dummy-host", "dummy-command",
+    ssh = SSH(params.user, params.sshPort, params.sshkey_file, "dummy-host", "dummy-command",
               params.bootdir, host_log_mock)
     log_sample = "log_sample"
     error_sample = "error_sample"
@@ -277,7 +277,7 @@ class TestBootstrap(TestCase):
     command_str = str(popenMock.call_args[0][0])
     self.assertEquals(command_str, "['ssh', '-o', 'ConnectTimeOut=60', '-o', "
             "'StrictHostKeyChecking=no', '-o', 'BatchMode=yes', '-tt', '-i', "
-            "'sshkey_file', 'root@dummy-host', 'dummy-command']")
+            "'sshkey_file', '-p', '123', 'root@dummy-host', 'dummy-command']")
     self.assertEqual(retcode["exitstatus"], 0)
 
     log['text'] = ""
@@ -295,7 +295,7 @@ class TestBootstrap(TestCase):
     process.returncode = 1
 
     dummy_error_message = "dummy_error_message"
-    ssh = SSH(params.user, params.sshkey_file, "dummy-host", "dummy-command",
+    ssh = SSH(params.user, params.sshPort, params.sshkey_file, "dummy-host", "dummy-command",
               params.bootdir, host_log_mock, errorMessage= dummy_error_message)
     retcode = ssh.run()
 
@@ -306,7 +306,7 @@ class TestBootstrap(TestCase):
 
 
   def test_getOsCheckScript(self):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -316,7 +316,7 @@ class TestBootstrap(TestCase):
 
   @patch.object(BootstrapDefault, "getRemoteName")
   def test_getOsCheckScriptRemoteLocation(self, getRemoteName_mock):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -328,7 +328,7 @@ class TestBootstrap(TestCase):
 
   @patch.object(BootstrapDefault, "is_suse")
   def test_getRepoFile(self, is_suse_mock):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -342,7 +342,7 @@ class TestBootstrap(TestCase):
   @patch.object(HostLog, "write")
   def test_createTargetDir(self, write_mock, run_mock,
                             init_mock):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -351,7 +351,7 @@ class TestBootstrap(TestCase):
     run_mock.return_value = expected
     res = bootstrap_obj.createTargetDir()
     self.assertEquals(res, expected)
-    command = str(init_mock.call_args[0][3])
+    command = str(init_mock.call_args[0][4])
     self.assertEqual(command,
                      "sudo mkdir -p /var/lib/ambari-agent/tmp ; "
                      "sudo chown -R root /var/lib/ambari-agent/tmp ; "
@@ -366,7 +366,7 @@ class TestBootstrap(TestCase):
   @patch.object(HostLog, "write")
   def test_copyOsCheckScript(self, write_mock, run_mock, init_mock,
                     getOsCheckScriptRemoteLocation_mock, getOsCheckScript_mock):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -377,8 +377,8 @@ class TestBootstrap(TestCase):
     run_mock.return_value = expected
     res = bootstrap_obj.copyOsCheckScript()
     self.assertEquals(res, expected)
-    input_file = str(init_mock.call_args[0][3])
-    remote_file = str(init_mock.call_args[0][4])
+    input_file = str(init_mock.call_args[0][4])
+    remote_file = str(init_mock.call_args[0][5])
     self.assertEqual(input_file, "OsCheckScript")
     self.assertEqual(remote_file, "OsCheckScriptRemoteLocation")
 
@@ -389,7 +389,7 @@ class TestBootstrap(TestCase):
   @patch.object(OSCheck, "is_ubuntu_family")
   @patch.object(OSCheck, "is_redhat_family")
   def test_getRepoFile(self, is_redhat_family, is_ubuntu_family, is_suse_family, hasPassword_mock, getRemoteName_mock):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     is_redhat_family.return_value = True
@@ -437,7 +437,7 @@ class TestBootstrap(TestCase):
     os_path_exists_mock.side_effect = os_path_exists_side_effect
     os_path_exists_mock.return_value = None
 
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     is_redhat_family.return_value = True
@@ -459,11 +459,11 @@ class TestBootstrap(TestCase):
     ssh_run_mock.side_effect = [expected2, expected4]
     res = bootstrap_obj.copyNeededFiles()
     self.assertEquals(res, expected1["exitstatus"])
-    input_file = str(scp_init_mock.call_args[0][3])
-    remote_file = str(scp_init_mock.call_args[0][4])
+    input_file = str(scp_init_mock.call_args[0][4])
+    remote_file = str(scp_init_mock.call_args[0][5])
     self.assertEqual(input_file, "setupAgentFile")
     self.assertEqual(remote_file, "RemoteName")
-    command = str(ssh_init_mock.call_args[0][3])
+    command = str(ssh_init_mock.call_args[0][4])
     self.assertEqual(command, "sudo chmod 644 RepoFile")
     # Another order
     expected1 = {"exitstatus": 0, "log": "log0", "errormsg": "errorMsg"}
@@ -507,7 +507,7 @@ class TestBootstrap(TestCase):
   @patch.object(HostLog, "write")
   def test_runOsCheckScript(self, write_mock, run_mock,
                             init_mock, getOsCheckScriptRemoteLocation_mock):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -517,7 +517,7 @@ class TestBootstrap(TestCase):
     run_mock.return_value = expected
     res = bootstrap_obj.runOsCheckScript()
     self.assertEquals(res, expected)
-    command = str(init_mock.call_args[0][3])
+    command = str(init_mock.call_args[0][4])
     self.assertEqual(command,
                      "chmod a+x OsCheckScriptRemoteLocation && "
                      "env PYTHONPATH=$PYTHONPATH:/var/lib/ambari-agent/tmp OsCheckScriptRemoteLocation centos6")
@@ -529,7 +529,7 @@ class TestBootstrap(TestCase):
   @patch.object(HostLog, "write")
   def test_runSetupAgent(self, write_mock, run_mock,
                          getRunSetupCommand_mock, init_mock):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -539,7 +539,7 @@ class TestBootstrap(TestCase):
     run_mock.return_value = expected
     res = bootstrap_obj.runSetupAgent()
     self.assertEquals(res, expected)
-    command = str(init_mock.call_args[0][3])
+    command = str(init_mock.call_args[0][4])
     self.assertEqual(command, "RunSetupCommand")
 
 
@@ -549,7 +549,7 @@ class TestBootstrap(TestCase):
   def test_getRunSetupCommand(self, getRunSetupWithoutPasswordCommand_mock,
                               getRunSetupWithPasswordCommand_mock,
                               hasPassword_mock):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -568,7 +568,7 @@ class TestBootstrap(TestCase):
   @patch.object(HostLog, "write")
   def test_createDoneFile(self, write_mock):
     tmp_dir = tempfile.gettempdir()
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", tmp_dir,
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", tmp_dir,
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -587,7 +587,7 @@ class TestBootstrap(TestCase):
   @patch.object(SSH, "run")
   @patch.object(HostLog, "write")
   def test_checkSudoPackage(self, write_mock, run_mock, init_mock, is_redhat_family, is_ubuntu_family, is_suse_family):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -599,7 +599,7 @@ class TestBootstrap(TestCase):
     is_suse_family.return_value = False
     res = bootstrap_obj.checkSudoPackage()
     self.assertEquals(res, expected)
-    command = str(init_mock.call_args[0][3])
+    command = str(init_mock.call_args[0][4])
     self.assertEqual(command, "rpm -qa | grep -e '^sudo\-'")
 
   @patch.object(OSCheck, "is_suse_family")
@@ -610,7 +610,7 @@ class TestBootstrap(TestCase):
   @patch.object(HostLog, "write")
   def test_checkSudoPackageUbuntu(self, write_mock, run_mock, init_mock,
                                   is_redhat_family, is_ubuntu_family, is_suse_family):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "ubuntu12",
                                None, "8440", "root")
     is_redhat_family.return_value = False
@@ -622,7 +622,7 @@ class TestBootstrap(TestCase):
     run_mock.return_value = expected
     res = bootstrap_obj.checkSudoPackage()
     self.assertEquals(res, expected)
-    command = str(init_mock.call_args[0][3])
+    command = str(init_mock.call_args[0][4])
     self.assertEqual(command, "dpkg --get-selections|grep -e '^sudo\s*install'")
 
 
@@ -632,7 +632,7 @@ class TestBootstrap(TestCase):
   @patch.object(BootstrapDefault, "getPasswordFile")
   def test_deletePasswordFile(self, getPasswordFile_mock, write_mock, run_mock,
                               init_mock):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -642,7 +642,7 @@ class TestBootstrap(TestCase):
     run_mock.return_value = expected
     res = bootstrap_obj.deletePasswordFile()
     self.assertEquals(res, expected)
-    command = str(init_mock.call_args[0][3])
+    command = str(init_mock.call_args[0][4])
     self.assertEqual(command, "rm PasswordFile")
 
 
@@ -655,7 +655,7 @@ class TestBootstrap(TestCase):
   def test_copyPasswordFile(self, write_mock, ssh_run_mock,
                             ssh_init_mock, scp_run_mock,
                             scp_init_mock, getPasswordFile_mock):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root", password_file="PasswordFile")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -669,11 +669,11 @@ class TestBootstrap(TestCase):
     ssh_run_mock.return_value = expected2
     res = bootstrap_obj.copyPasswordFile()
     self.assertEquals(res, expected1["exitstatus"])
-    input_file = str(scp_init_mock.call_args[0][3])
+    input_file = str(scp_init_mock.call_args[0][4])
     remote_file = str(scp_init_mock.call_args[0][4])
     self.assertEqual(input_file, "PasswordFile")
     self.assertEqual(remote_file, "PasswordFile")
-    command = str(ssh_init_mock.call_args[0][3])
+    command = str(ssh_init_mock.call_args[0][4])
     self.assertEqual(command, "chmod 600 PasswordFile")
     # Another order
     expected1 = {"exitstatus": 0, "log": "log0", "errormsg": "errorMsg"}
@@ -688,7 +688,7 @@ class TestBootstrap(TestCase):
   @patch.object(BootstrapDefault, "getPasswordFile")
   def test_changePasswordFileModeOnHost(self, getPasswordFile_mock, write_mock,
                                         run_mock, init_mock):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -698,14 +698,14 @@ class TestBootstrap(TestCase):
     run_mock.return_value = expected
     res = bootstrap_obj.changePasswordFileModeOnHost()
     self.assertEquals(res, expected)
-    command = str(init_mock.call_args[0][3])
+    command = str(init_mock.call_args[0][4])
     self.assertEqual(command, "chmod 600 PasswordFile")
 
 
   @patch.object(HostLog, "write")
   def test_try_to_execute(self, write_mock):
     expected = 43
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -738,7 +738,7 @@ class TestBootstrap(TestCase):
   @patch("logging.error")
   def test_run(self, error_mock, warn_mock, write_mock, createDoneFile_mock,
                hasPassword_mock, try_to_execute_mock):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -810,7 +810,7 @@ class TestBootstrap(TestCase):
   @patch.object(BootstrapDefault, "createDoneFile")
   @patch.object(HostLog, "write")
   def test_interruptBootstrap(self, write_mock, createDoneFile_mock):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     bootstrap_obj = Bootstrap("hostname", shared_state)
@@ -827,7 +827,7 @@ class TestBootstrap(TestCase):
   @patch.object(BootstrapDefault, "getStatus")
   def test_PBootstrap(self, getStatus_mock, interruptBootstrap_mock, start_mock,
                       info_mock, warn_mock, time_mock, sleep_mock):
-    shared_state = SharedState("root", "sshkey_file", "scriptDir", "bootdir",
+    shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
                                "setupAgentFile", "ambariServer", "centos6",
                                None, "8440", "root")
     n = 180

http://git-wip-us.apache.org/repos/asf/ambari/blob/39c04ac9/ambari-web/app/controllers/wizard.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard.js b/ambari-web/app/controllers/wizard.js
index fdee580..517122f 100644
--- a/ambari-web/app/controllers/wizard.js
+++ b/ambari-web/app/controllers/wizard.js
@@ -616,6 +616,7 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, App.ThemesMappingM
     sshKey: "", //string
     bootRequestId: null, //string
     sshUser: "root", //string
+    sshPort: "22",
     agentUser: "root" //string
   },
 
@@ -628,6 +629,7 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, App.ThemesMappingM
     sshKey: "", //string
     bootRequestId: null, //string
     sshUser: "", //string
+    sshPort: "22",
     agentUser: "" //string
   },
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/39c04ac9/ambari-web/app/controllers/wizard/step2_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step2_controller.js b/ambari-web/app/controllers/wizard/step2_controller.js
index 3b51761..d951102 100644
--- a/ambari-web/app/controllers/wizard/step2_controller.js
+++ b/ambari-web/app/controllers/wizard/step2_controller.js
@@ -94,6 +94,14 @@ App.WizardStep2Controller = Em.Controller.extend({
   }.property('content.installOptions.sshUser'),
 
   /**
+   * "Shortcut" to <code>content.installOptions.sshPort</code>
+   * @type {string}
+   */
+  sshPort: function () {
+    return this.get('content.installOptions.sshPort');
+  }.property('content.installOptions.sshPort'),
+
+  /**
    * "Shortcut" to <code>content.installOptions.agentUser</code>
    * @type {string}
    */
@@ -148,6 +156,17 @@ App.WizardStep2Controller = Em.Controller.extend({
   }.property('sshUser', 'useSSH', 'hasSubmitted', 'manualInstall'),
 
   /**
+   * Error-message if <code>sshPort</code> is empty, null otherwise
+   * @type {string|null}
+   */
+  sshPortError: function () {
+    if (this.get('manualInstall') === false && this.get('useSSH') && Em.isEmpty(this.get('sshPort').trim() ))  {
+      return Em.I18n.t('installer.step2.sshPort.required');
+    }
+    return null;
+  }.property('sshPort', 'useSSH', 'hasSubmitted', 'manualInstall'),
+
+  /**
    * Error-message if <code>agentUser</code> is empty, null otherwise
    * @type {string|null}
    */
@@ -163,8 +182,8 @@ App.WizardStep2Controller = Em.Controller.extend({
    * @type {bool}
    */
   isSubmitDisabled: function () {
-    return (this.get('hostsError') || this.get('sshKeyError') || this.get('sshUserError') || this.get('agentUserError'));
-  }.property('hostsError', 'sshKeyError', 'sshUserError', 'agentUserError'),
+    return (this.get('hostsError') || this.get('sshKeyError') || this.get('sshUserError') || this.get('sshPortError') || this.get('agentUserError'));
+  }.property('hostsError', 'sshKeyError', 'sshUserError', 'sshPortError', 'agentUserError'),
 
   installedHostNames: function () {
     var installedHostsName = [];
@@ -292,7 +311,7 @@ App.WizardStep2Controller = Em.Controller.extend({
       this.set('hostsError', Em.I18n.t('installer.step2.hostName.error.already_installed'));
     }
 
-    if (this.get('hostsError') || this.get('sshUserError') || this.get('agentUserError') || this.get('sshKeyError')) {
+    if (this.get('hostsError') || this.get('sshUserError') || this.get('sshPortError') || this.get('agentUserError') || this.get('sshKeyError')) {
       return false;
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/39c04ac9/ambari-web/app/controllers/wizard/step3_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step3_controller.js b/ambari-web/app/controllers/wizard/step3_controller.js
index b3645e9..efea446 100644
--- a/ambari-web/app/controllers/wizard/step3_controller.js
+++ b/ambari-web/app/controllers/wizard/step3_controller.js
@@ -278,6 +278,7 @@ App.WizardStep3Controller = Em.Controller.extend(App.ReloadPopupMixin, {
         'sshKey': this.get('content.installOptions.sshKey'),
         'hosts': this.getBootstrapHosts(),
         'user': this.get('content.installOptions.sshUser'),
+        'sshPort': this.get('content.installOptions.sshPort'),
         'userRunAs': App.get('supports.customizeAgentUserAccount') ? this.get('content.installOptions.agentUser') : 'root'
     });
     App.router.get(this.get('content.controllerName')).launchBootstrap(bootStrapData, function (requestId) {
@@ -461,6 +462,7 @@ App.WizardStep3Controller = Em.Controller.extend(App.ReloadPopupMixin, {
         'sshKey': this.get('content.installOptions.sshKey'),
         'hosts': hosts.mapProperty('name'),
         'user': this.get('content.installOptions.sshUser'),
+        'sshPort': this.get('content.installOptions.sshPort'),
         'userRunAs': App.get('supports.customizeAgentUserAccount') ? this.get('content.installOptions.agentUser') : 'root'
       });
     this.set('numPolls', 0);

http://git-wip-us.apache.org/repos/asf/ambari/blob/39c04ac9/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 8ee266c..c12ba24 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -586,6 +586,9 @@ Em.I18n.translations = {
   'installer.step2.sshUser.toolTip':'The user account used to install the Ambari Agent on the target host(s) via SSH. This user must be set up with passwordless SSH and sudo access on all the target host(s)',
   'installer.step2.sshUser.placeholder':'Enter user name',
   'installer.step2.sshUser.required':'User name is required',
+  'installer.step2.sshPort':'SSH Port Number',
+  'installer.step2.sshPort.toolTip':'SSH Port Number',
+  'installer.step2.sshPort.required':'SSH Port Number is required.',
   'installer.step2.agentUser':'Ambari Agent User Account',
   'installer.step2.agentUser.toolTip':'The user account used to run the Ambari Agent daemon on the target host(s). This user must be set up with passwordless sudo access on all the target host(s)',
   'installer.step2.bootStrap.error':'Errors were encountered while setting up Ambari Agents on the hosts.',

http://git-wip-us.apache.org/repos/asf/ambari/blob/39c04ac9/ambari-web/app/styles/application.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/application.less b/ambari-web/app/styles/application.less
index dae745ae..8486411 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -719,6 +719,10 @@ h1 {
       margin-right: 10px;
       padding-top: 5px;
     }
+    .ssh-port {
+      margin-right: 10px;
+      padding-top: 5px;
+    }
     #targetHosts {
       .target-hosts-input {
         padding-left: 18px;

http://git-wip-us.apache.org/repos/asf/ambari/blob/39c04ac9/ambari-web/app/templates/wizard/step2.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/step2.hbs b/ambari-web/app/templates/wizard/step2.hbs
index a194cc0..5aa4f89 100644
--- a/ambari-web/app/templates/wizard/step2.hbs
+++ b/ambari-web/app/templates/wizard/step2.hbs
@@ -90,6 +90,18 @@
               {{/if}}
             </div>
           </div>
+           <div class="row-fluid">
+                <label rel="tooltip" {{translateAttr title="installer.step2.sshPort.toolTip"}} class="ssh-port pull-left span4">
+                    {{t installer.step2.sshPort}}
+                </label>
+
+                <div {{bindAttr class="sshPortError:error :control-group"}}>
+                    {{view view.textFieldView valueBinding="content.installOptions.sshPort" isEnabledBinding="content.installOptions.useSsh" }}
+                    {{#if sshPortError}}
+                        <span class="help-inline">{{sshPortError}}</span>
+                    {{/if}}
+                </div>
+           </div>
           {{#if App.supports.customizeAgentUserAccount}}
             <div class="row-fluid">
               <label rel="tooltip" {{translateAttr title="installer.step2.agentUser.toolTip"}} class="ssh-user pull-left span4">

http://git-wip-us.apache.org/repos/asf/ambari/blob/39c04ac9/ambari-web/test/controllers/wizard/step2_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/wizard/step2_test.js b/ambari-web/test/controllers/wizard/step2_test.js
index d62b247..8f0fc66 100644
--- a/ambari-web/test/controllers/wizard/step2_test.js
+++ b/ambari-web/test/controllers/wizard/step2_test.js
@@ -29,21 +29,25 @@ describe('App.WizardStep2Controller', function () {
     {
       manualInstall: false,
       user: '',
+      sshPort:'',
       e: ''
     },
     {
       manualInstall: true,
       user: '',
+      sshPort:'',
       e: null
     },
     {
       manualInstall: true,
       user: 'nobody',
+      sshPort:'123',
       e: null
     },
     {
       manualInstall: false,
       user: 'nobody',
+      sshPort:'123',
       e: null
     }
   ]);
@@ -100,6 +104,15 @@ describe('App.WizardStep2Controller', function () {
     });
   });
 
+  describe('#sshPort', function() {
+      it('should be equal to content.installOptions.sshPort', function() {
+          var controller = App.WizardStep2Controller.create({content: {installOptions: {sshPort: '123'}}});
+          expect(controller.get('sshPort')).to.equal('123');
+          controller.set('content.installOptions.sshPort', '321');
+          expect(controller.get('sshPort')).to.equal('321');
+      });
+  });
+
   describe('#agentUser', function() {
     it('should be equal to content.installOptions.agentUser', function() {
       var controller = App.WizardStep2Controller.create({content: {installOptions: {agentUser: '123'}}});
@@ -285,6 +298,22 @@ describe('App.WizardStep2Controller', function () {
 
   });
 
+  describe('#sshPortError', function () {
+
+      userErrorTests.forEach(function(test) {
+          it('', function() {
+              var controller = App.WizardStep2Controller.create({content: {installOptions: {manualInstall: test.manualInstall, sshPort: test.sshPort}}});
+              if(Em.isNone(test.e)) {
+                  expect(controller.get('sshPortError')).to.equal(null);
+              }
+              else {
+                  expect(controller.get('sshPortError').length).to.be.above(2);
+              }
+          });
+      });
+
+  });
+
   describe('#agentUserError', function () {
 
     afterEach(function () {
@@ -379,6 +408,15 @@ describe('App.WizardStep2Controller', function () {
       expect(controller.evaluateStep()).to.equal(false);
     });
 
+    it('should return false if sshPortError is not empty', function () {
+        var controller = App.WizardStep2Controller.create({
+            hostNames: 'apache.ambari',
+            parseHostNamesAsPatternExpression: Em.K
+        });
+        controller.reopen({sshPortError: 'error'});
+        expect(controller.evaluateStep()).to.equal(false);
+    });
+
     it('should return false if agentUserError is not empty', function () {
       var controller = App.WizardStep2Controller.create({
         hostNames: 'apache.ambari',
@@ -487,6 +525,7 @@ describe('App.WizardStep2Controller', function () {
       hostsError: '',
       sshKeyError: '',
       sshUserError: '',
+      sshPortError: '',
       agentUserError: ''
     });
 
@@ -512,6 +551,12 @@ describe('App.WizardStep2Controller', function () {
       controller.set('sshUserError', '');
       expect(controller.get('isSubmitDisabled').length).to.above(0);
     });
+
+    it('should return value if sshPortError is not empty', function () {
+        controller.set('sshPortError', 'error');
+        controller.set('agentUserError', '');
+        expect(controller.get('isSubmitDisabled').length).to.above(0);
+    });
   });
 
   describe('#installedHostsPopup', function() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/39c04ac9/ambari-web/test/controllers/wizard/step3_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/wizard/step3_test.js b/ambari-web/test/controllers/wizard/step3_test.js
index 74c62e6..f48514c 100644
--- a/ambari-web/test/controllers/wizard/step3_test.js
+++ b/ambari-web/test/controllers/wizard/step3_test.js
@@ -558,6 +558,7 @@ describe('App.WizardStep3Controller', function () {
             installOptions: {
               sshKey: 'key',
               sshUser: 'root',
+              sshPort: '123',
               agentUser: 'user'
             },
             hosts: { "host0": { "name": "host0" }, "host1": { "name": "host1" } }
@@ -571,6 +572,7 @@ describe('App.WizardStep3Controller', function () {
           sshKey: 'key',
           hosts: ['host0', 'host1'],
           user: 'root',
+          sshPort: '123',
           userRunAs: item.userRunAs
         }));
       });
@@ -2410,6 +2412,7 @@ describe('App.WizardStep3Controller', function () {
           installOptions: {
             sshKey: 'key',
             sshUser: 'root',
+            sshPort: '123',
             agentUser: 'user'
           },
           hosts: { "host0": { "name": "host0" }, "host1": { "name": "host1" } },
@@ -2436,6 +2439,7 @@ describe('App.WizardStep3Controller', function () {
           sshKey: 'key',
           hosts: ['host0', 'host1'],
           user: 'root',
+          sshPort: '123',
           userRunAs: item.userRunAs
         }));
       });


[45/50] [abbrv] ambari git commit: AMBARI-13539 Wrong cluster-env version sent during validation. (ababiichuk)

Posted by nc...@apache.org.
AMBARI-13539 Wrong cluster-env version sent during validation. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a61a83fa
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a61a83fa
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a61a83fa

Branch: refs/heads/branch-dev-patch-upgrade
Commit: a61a83fae8170ebf3be7d4bc58b91b75dc7e0b32
Parents: d30b5f0
Author: aBabiichuk <ab...@cybervisiontech.com>
Authored: Fri Oct 23 14:19:14 2015 +0300
Committer: aBabiichuk <ab...@cybervisiontech.com>
Committed: Fri Oct 23 14:19:43 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/mixins/common/serverValidator.js | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a61a83fa/ambari-web/app/mixins/common/serverValidator.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/serverValidator.js b/ambari-web/app/mixins/common/serverValidator.js
index 04c5882..4d22fdd 100644
--- a/ambari-web/app/mixins/common/serverValidator.js
+++ b/ambari-web/app/mixins/common/serverValidator.js
@@ -304,8 +304,8 @@ App.ServerValidatorMixin = Em.Mixin.create({
       error: 'validationError'
     }).done(function (data) {
       App.router.get('configurationController').getConfigsByTags([{
-        siteName: data.items[0].type,
-        tagName: data.items[0].tag
+        siteName: data.items[data.items.length - 1].type,
+        tagName: data.items[data.items.length - 1].tag
       }]).done(function (clusterEnvConfigs) {
         var configsObject = clusterEnvConfigs[0].properties;
         var configsArray = [];


[25/50] [abbrv] ambari git commit: AMBARI-13526 Validation popup on service config page shows properties not from current service. (ababiichuk)

Posted by nc...@apache.org.
AMBARI-13526 Validation popup on service config page shows properties not from current service. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3383f024
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3383f024
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3383f024

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 3383f0245079865c353d5457a3ec6967fbdf1eac
Parents: a8445c9
Author: aBabiichuk <ab...@cybervisiontech.com>
Authored: Thu Oct 22 18:03:59 2015 +0300
Committer: aBabiichuk <ab...@cybervisiontech.com>
Committed: Thu Oct 22 18:05:15 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/mixins/common/serverValidator.js             | 9 ++++++++-
 .../common/modal_popups/config_recommendation_popup.hbs     | 2 +-
 2 files changed, 9 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3383f024/ambari-web/app/mixins/common/serverValidator.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/serverValidator.js b/ambari-web/app/mixins/common/serverValidator.js
index 46c0b13..04c5882 100644
--- a/ambari-web/app/mixins/common/serverValidator.js
+++ b/ambari-web/app/mixins/common/serverValidator.js
@@ -437,7 +437,14 @@ App.ServerValidatorMixin = Em.Mixin.create({
         },
         bodyClass: Em.View.extend({
           controller: self,
-          templateName: require('templates/common/modal_popups/config_recommendation_popup')
+          templateName: require('templates/common/modal_popups/config_recommendation_popup'),
+          serviceConfigs: function() {
+            if (this.get('controller.name') === 'mainServiceInfoConfigsController') {
+              return [this.get('controller.selectedService')];
+            } else {
+              return this.get('controller.stepConfigs');
+            }
+          }.property()
         })
       });
     } else {

http://git-wip-us.apache.org/repos/asf/ambari/blob/3383f024/ambari-web/app/templates/common/modal_popups/config_recommendation_popup.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/modal_popups/config_recommendation_popup.hbs b/ambari-web/app/templates/common/modal_popups/config_recommendation_popup.hbs
index 5283417..c45e435 100644
--- a/ambari-web/app/templates/common/modal_popups/config_recommendation_popup.hbs
+++ b/ambari-web/app/templates/common/modal_popups/config_recommendation_popup.hbs
@@ -28,7 +28,7 @@
     </tr>
     </thead>
     <tbody>
-      {{#each service in stepConfigs}}
+      {{#each service in view.serviceConfigs}}
         {{#each property in service.configs}}
           {{#if property.warn}}
             <tr>


[10/50] [abbrv] ambari git commit: AMBARI-13483. [CapSchedView] show leaf and non-leaf queues with different UI indicators. (Gaurav Nagar via yusaku)

Posted by nc...@apache.org.
AMBARI-13483. [CapSchedView] show leaf and non-leaf queues with different UI indicators. (Gaurav Nagar via yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8ef5beb8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8ef5beb8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8ef5beb8

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 8ef5beb857cc4c1fa077bdf2e0dbb1bc55ebb2f5
Parents: 383aac0
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Wed Oct 21 15:25:07 2015 -0700
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Wed Oct 21 15:25:07 2015 -0700

----------------------------------------------------------------------
 .../resources/ui/app/templates/components/queueListItem.hbs    | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8ef5beb8/contrib/views/capacity-scheduler/src/main/resources/ui/app/templates/components/queueListItem.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/capacity-scheduler/src/main/resources/ui/app/templates/components/queueListItem.hbs b/contrib/views/capacity-scheduler/src/main/resources/ui/app/templates/components/queueListItem.hbs
index 075f9cd..e3ff8fc 100644
--- a/contrib/views/capacity-scheduler/src/main/resources/ui/app/templates/components/queueListItem.hbs
+++ b/contrib/views/capacity-scheduler/src/main/resources/ui/app/templates/components/queueListItem.hbs
@@ -41,6 +41,12 @@
       </span>
     {{/if}}
 
+    {{#unless queue.queues}}
+      <span class="badge pull-right">
+        <i class="fa fa-leaf"></i>
+      </span>
+    {{/unless}}
+
     </span>
   {{/link-to}}
 


[47/50] [abbrv] ambari git commit: AMBARI-13544. Kerberos: credential dialog ux edits

Posted by nc...@apache.org.
AMBARI-13544. Kerberos: credential dialog ux edits


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b27212da
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b27212da
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b27212da

Branch: refs/heads/branch-dev-patch-upgrade
Commit: b27212da487b3853388ff656793bc38c5b0b79d7
Parents: 502dc18
Author: Alex Antonenko <hi...@gmail.com>
Authored: Fri Oct 23 16:05:34 2015 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Fri Oct 23 16:13:51 2015 +0300

----------------------------------------------------------------------
 ambari-web/app/messages.js                      |  2 ++
 .../common/form/manage_credentilas_form.hbs     | 29 ++------------------
 ambari-web/app/templates/common/modal_popup.hbs |  2 +-
 .../app/templates/main/admin/kerberos.hbs       |  4 ++-
 .../common/form/manage_credentials_form_view.js | 17 ++++++++++--
 ambari-web/app/views/common/modal_popup.js      |  3 +-
 .../manage_kdc_credentials_popup.js             | 24 +++++++++++-----
 .../form/manage_kdc_credentials_form_test.js    |  4 +--
 8 files changed, 44 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b27212da/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 9b5a93e..1f62a7e 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -1043,6 +1043,8 @@ Em.I18n.translations = {
   'admin.kerberos.credentials.store.hint.not.supported': 'Ambari is not configured for storing credentials',
   'admin.kerberos.credentials.store.label': 'Save Admin Credentials',
   'admin.kerberos.credentials.store.menu.label': 'Manage KDC Credentials',
+  'admin.kerberos.credentials.form.header.stored': 'Update or remove the stored KDC Credentials in the encrypted credential store.',
+  'admin.kerberos.credentials.form.header.not.stored': 'Specify the KDC Admin Credentials to remember in the encrypted credential store.',
   'admin.kerberos.credentials.remove.confirmation.header': 'Remove KDC Credentials Confirmation',
   'admin.kerberos.credentials.remove.confirmation.body': 'You are about to remove the KDC Credentials from Ambari. Are you sure?',
   'admin.kerberos.wizard.configuration.note': 'This is the initial configuration created by Enable Kerberos wizard.',

http://git-wip-us.apache.org/repos/asf/ambari/blob/b27212da/ambari-web/app/templates/common/form/manage_credentilas_form.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/form/manage_credentilas_form.hbs b/ambari-web/app/templates/common/form/manage_credentilas_form.hbs
index c72e024..71f44ee 100644
--- a/ambari-web/app/templates/common/form/manage_credentilas_form.hbs
+++ b/ambari-web/app/templates/common/form/manage_credentilas_form.hbs
@@ -16,8 +16,10 @@
 * limitations under the License.
 }}
 
-
 <form class="form-horizontal">
+  <p class="alert alert-info">
+    {{view.formHeader}}
+  </p>
   <div class="control-group">
     <label class="control-label">{{t popup.invalid.KDC.admin.principal}}</label>
     <div class="controls">
@@ -30,29 +32,4 @@
       {{view Ember.TextField type="password" valueBinding="view.password" class="form-control"}}
     </div>
   </div>
-  <div class="control-group">
-    <span class="control-label"></span>
-    <div class="controls">
-      {{#if App.supports.storeKDCCredentials}}
-        <label>
-          {{view Ember.Checkbox checkedBinding="view.storeCredentials" disabledBinding="view.checkboxDisabled" classNames="pull-left"}}
-          <span {{bindAttr class=":mls view.checkboxDisabled:muted"}}>
-            {{t admin.kerberos.credentials.store.label}}
-            <a class="icon-question-sign icon-blue" rel="tooltip" href="javascript:void(null);" data-toggle="tooltip" {{bindAttr data-original-title="view.hintMessage"}}><a/>
-          </span>
-        </label>
-      {{/if}}
-    </div>
-  </div>
-  <div class="control-group">
-    <span class="control-label"></span>
-    <div class="controls">
-      <button {{bindAttr class=":btn :btn-danger :pull-left view.isRemovable::hidden" disabled="view.isRemoveDisabled"}} {{action removeKDCCredentials target="view"}}>
-        <i class="icon-remove-circle"></i> {{t common.remove}}</button>
-      <div {{bindAttr class=":spinner :mll :pull-left view.isActionInProgress::hide"}}></div>
-      {{#if view.actionStatus}}
-        <span class="pull-left lh-btn mll text-success">{{view.actionStatus}}</span>
-      {{/if}}
-    </div>
-  </div>
 </form>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b27212da/ambari-web/app/templates/common/modal_popup.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/modal_popup.hbs b/ambari-web/app/templates/common/modal_popup.hbs
index c95a3ac..8559da5 100644
--- a/ambari-web/app/templates/common/modal_popup.hbs
+++ b/ambari-web/app/templates/common/modal_popup.hbs
@@ -52,7 +52,7 @@
           {{t app.settings.notShowBgOperations}}</label>
         {{/if}}
         {{#if view.third}}
-          <button class="btn" {{bindAttr disabled="view.disableThird"}} {{action onThird target="view"}}>{{view.third}}</button>
+          <button {{bindAttr disabled="view.disableThird" class=":btn view.thirdClass"}} {{action onThird target="view"}}>{{view.third}}</button>
         {{/if}}
         {{#if view.secondary}}
           <button {{bindAttr disabled="view.disableSecondary" class=":btn view.secondaryClass"}} {{action onSecondary target="view"}}>{{view.secondary}}</button>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b27212da/ambari-web/app/templates/main/admin/kerberos.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/kerberos.hbs b/ambari-web/app/templates/main/admin/kerberos.hbs
index 8b3f9dd..3dbc4d0 100644
--- a/ambari-web/app/templates/main/admin/kerberos.hbs
+++ b/ambari-web/app/templates/main/admin/kerberos.hbs
@@ -25,7 +25,9 @@
             <button class="btn btn-success" id="regenerate-keytabs" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action regenerateKeytabs target="controller"}}>
               <i class="icon-repeat"></i> {{t admin.kerberos.button.regenerateKeytabs}}</button>
             {{#if App.supports.storeKDCCredentials}}
-              <button class="btn btn-primary" {{action showManageKDCCredentialsPopup target="controller"}}>{{t admin.kerberos.credentials.store.menu.label}}</button>
+              {{#if App.isCredentialStorePersistent}}
+                <button class="btn btn-primary" {{action showManageKDCCredentialsPopup target="controller"}}>{{t admin.kerberos.credentials.store.menu.label}}</button>
+              {{/if}}
             {{/if}}
           {{/unless}}
           <br/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b27212da/ambari-web/app/views/common/form/manage_credentials_form_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/form/manage_credentials_form_view.js b/ambari-web/app/views/common/form/manage_credentials_form_view.js
index 2bc27ee..3327d0d 100644
--- a/ambari-web/app/views/common/form/manage_credentials_form_view.js
+++ b/ambari-web/app/views/common/form/manage_credentials_form_view.js
@@ -85,9 +85,15 @@ App.ManageCredentialsFormView = Em.View.extend({
    * @type {string}
    */
   storageType: function() {
-    return this.get('storeCredentials') ? credentialsUtils.STORE_TYPES.PERSISTENT : credentialsUtils.STORE_TYPES.TEMPORARY;
+    return credentialsUtils.STORE_TYPES.PERSISTENT;
   }.property('storeCredentials'),
 
+  formHeader: function() {
+    return this.get('isRemovable') ?
+      Em.I18n.t('admin.kerberos.credentials.form.header.stored') :
+      Em.I18n.t('admin.kerberos.credentials.form.header.not.stored');
+  }.property('isRemovable'),
+
   /**
    * Message to display in tooltip regarding persistent storage state.
    *
@@ -157,11 +163,12 @@ App.ManageCredentialsFormView = Em.View.extend({
   /**
    * Remove KDC credentials action.
    *
-   * @returns {App.ModalPopup}
+   * @returns {object} for better testing purpose returns object { deferred: $.Deferred, popup: App.ModalPopup }
    */
   removeKDCCredentials: function() {
     var t = Em.I18n.t;
     var self = this;
+    var dfd = $.Deferred();
     this.set('actionStatus', false);
     var popup = App.showConfirmationPopup(
       function() {
@@ -172,6 +179,7 @@ App.ManageCredentialsFormView = Em.View.extend({
             self.prepareContent();
             self.set('actionStatus', Em.I18n.t('common.success'));
             self.get('parentView').set('isCredentialsRemoved', true);
+            dfd.resolve();
           });
       }, t('admin.kerberos.credentials.remove.confirmation.body'),
       function () {},
@@ -179,7 +187,10 @@ App.ManageCredentialsFormView = Em.View.extend({
       t('yes'),
       false);
     popup.set('secondary', t('no'));
-    return popup;
+    return {
+      deferred: dfd,
+      popup: popup
+    };
   },
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/b27212da/ambari-web/app/views/common/modal_popup.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/modal_popup.js b/ambari-web/app/views/common/modal_popup.js
index c6b8438..fbf539a 100644
--- a/ambari-web/app/views/common/modal_popup.js
+++ b/ambari-web/app/views/common/modal_popup.js
@@ -36,6 +36,7 @@ App.ModalPopup = Ember.View.extend({
   disableThird: false,
   primaryClass: 'btn-success',
   secondaryClass: '',
+  thirdClass: '',
   onPrimary: function () {
     this.hide();
   },
@@ -120,4 +121,4 @@ App.ModalPopup.reopenClass({
     return popup;
   }
 
-});
\ No newline at end of file
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/b27212da/ambari-web/app/views/common/modal_popups/manage_kdc_credentials_popup.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/modal_popups/manage_kdc_credentials_popup.js b/ambari-web/app/views/common/modal_popups/manage_kdc_credentials_popup.js
index 1de8e56..87592aa 100644
--- a/ambari-web/app/views/common/modal_popups/manage_kdc_credentials_popup.js
+++ b/ambari-web/app/views/common/modal_popups/manage_kdc_credentials_popup.js
@@ -17,7 +17,6 @@
  */
 
 var App = require('app');
-var credentialsUtils = require('utils/credentials');
 
 /**
  * @return {*}
@@ -27,6 +26,14 @@ App.showManageCredentialsPopup = function () {
     header: Em.I18n.t('admin.kerberos.credentials.store.menu.label'),
     bodyClass: App.ManageCredentialsFormView,
     primary: Em.I18n.t('common.save'),
+
+    thirdClass: 'pull-left btn-danger',
+    third: function() {
+      return this.get('formView.isRemovable') ?
+        Em.I18n.t('common.remove') :
+        null;
+    }.property('formView.isRemovable'),
+
     isCredentialsRemoved: false,
 
     disablePrimary: function() {
@@ -37,12 +44,6 @@ App.showManageCredentialsPopup = function () {
       return this.get('childViews').findProperty('viewName', 'manageCredentialsForm');
     }.property(),
 
-    credentialsRemoveObserver: function() {
-      if (this.get('isCredentialsRemoved')) {
-        this.hide();
-      }
-    }.observes('isCredentialsRemoved'),
-
     onPrimary: function() {
       var self = this;
       var formView = this.get('formView');
@@ -53,6 +54,15 @@ App.showManageCredentialsPopup = function () {
       } else {
         this.hide();
       }
+    },
+
+    onThird: function() {
+      var self = this;
+      if (this.get('formView')) {
+        this.get('formView').removeKDCCredentials().deferred.always(function() {
+          self.hide();
+        });
+      }
     }
   });
 };

http://git-wip-us.apache.org/repos/asf/ambari/blob/b27212da/ambari-web/test/views/common/form/manage_kdc_credentials_form_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/common/form/manage_kdc_credentials_form_test.js b/ambari-web/test/views/common/form/manage_kdc_credentials_form_test.js
index 0d8c9af..1cba71c 100644
--- a/ambari-web/test/views/common/form/manage_kdc_credentials_form_test.js
+++ b/ambari-web/test/views/common/form/manage_kdc_credentials_form_test.js
@@ -104,13 +104,13 @@ describe('#App.ManageCredentialsFormView', function() {
 
   describe('#removeKDCCredentials', function() {
     it('should show confirmation popup', function() {
-      var popup = view.removeKDCCredentials();
+      var popup = view.removeKDCCredentials().popup;
       expect(popup).be.instanceof(App.ModalPopup);
       popup.destroy();
     });
     it('should call credentialUtils#removeCredentials', function() {
       this.clock = sinon.useFakeTimers();
-      var popup = view.removeKDCCredentials();
+      var popup = view.removeKDCCredentials().popup;
       assert.isFalse(view.get('actionStatus'), '#actionStatus before remove');
       sinon.stub(credentialUtils, 'removeCredentials', function() {
         var dfd = $.Deferred();


[14/50] [abbrv] ambari git commit: AMBARI-12701. Stop-and-Start Upgrade: Handle Core Services (alejandro)

Posted by nc...@apache.org.
AMBARI-12701. Stop-and-Start Upgrade: Handle Core Services (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7afe5a4e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7afe5a4e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7afe5a4e

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 7afe5a4ec9200c41922fb1c0809f34a0d8d700bd
Parents: 34a0353
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Wed Oct 21 17:48:24 2015 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Wed Oct 21 19:22:52 2015 -0700

----------------------------------------------------------------------
 .../libraries/script/script.py                  |  68 +++++---
 .../PreUpgradeCheckResourceProvider.java        |  12 +-
 .../internal/UpgradeResourceProvider.java       |  50 +++++-
 .../ambari/server/stack/MasterHostResolver.java |  23 +++
 .../ambari/server/state/UpgradeHelper.java      |  76 +++++++--
 .../state/stack/upgrade/ClusterGrouping.java    |   5 +-
 .../state/stack/upgrade/ColocatedGrouping.java  |   8 +-
 .../server/state/stack/upgrade/Grouping.java    |  15 +-
 .../stack/upgrade/ServiceCheckGrouping.java     |   2 +-
 .../state/stack/upgrade/StageWrapper.java       |  40 ++++-
 .../stack/upgrade/StageWrapperBuilder.java      |   5 +-
 .../server/state/stack/upgrade/TaskWrapper.java |  27 ++-
 .../state/stack/upgrade/TaskWrapperBuilder.java |  10 +-
 .../HDFS/2.1.0.2.0/package/scripts/datanode.py  |  40 +++--
 .../package/scripts/datanode_upgrade.py         |  27 +--
 .../2.1.0.2.0/package/scripts/hdfs_client.py    |   6 +-
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  | 124 +++++++++-----
 .../2.1.0.2.0/package/scripts/journalnode.py    |  15 +-
 .../package/scripts/journalnode_upgrade.py      |   2 +-
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py  | 115 ++++++++++---
 .../package/scripts/namenode_ha_state.py        |  27 +++
 .../package/scripts/namenode_upgrade.py         | 166 +++++++++++++++++--
 .../2.1.0.2.0/package/scripts/nfsgateway.py     |   6 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |   8 +-
 .../package/scripts/setup_ranger_hdfs.py        |   4 +-
 .../HDFS/2.1.0.2.0/package/scripts/snamenode.py |   8 +-
 .../HDFS/2.1.0.2.0/package/scripts/utils.py     |  30 ++++
 .../2.1.0.2.0/package/scripts/zkfc_slave.py     |   4 +-
 .../scripts/application_timeline_server.py      |   8 +-
 .../2.1.0.2.0/package/scripts/historyserver.py  |   8 +-
 .../package/scripts/mapreduce2_client.py        |   2 +-
 .../2.1.0.2.0/package/scripts/nodemanager.py    |  12 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |   2 +-
 .../package/scripts/resourcemanager.py          |   8 +-
 .../2.1.0.2.0/package/scripts/yarn_client.py    |   2 +-
 .../3.4.5.2.0/package/scripts/zookeeper.py      |   6 +-
 .../package/scripts/zookeeper_client.py         |   8 +-
 .../package/scripts/zookeeper_server.py         |  25 +--
 .../package/scripts/zookeeper_service.py        |   4 +-
 .../HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml |  31 +++-
 .../stacks/HDP/2.2/upgrades/config-upgrade.xml  |  11 ++
 .../HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml |  13 ++
 .../stack/upgrade/StageWrapperBuilderTest.java  |   3 +-
 .../python/stacks/2.0.6/HDFS/test_datanode.py   |  22 +--
 .../stacks/2.0.6/HDFS/test_hdfs_client.py       |   8 +-
 .../stacks/2.0.6/HDFS/test_journalnode.py       |  18 +-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |  24 +--
 .../python/stacks/2.0.6/HDFS/test_nfsgateway.py |   4 +-
 .../stacks/2.0.6/YARN/test_historyserver.py     |   4 +-
 .../stacks/2.0.6/YARN/test_mapreduce2_client.py |   4 +-
 .../stacks/2.0.6/YARN/test_nodemanager.py       |  16 +-
 .../stacks/2.0.6/YARN/test_resourcemanager.py   |   4 +-
 .../stacks/2.0.6/YARN/test_yarn_client.py       |   4 +-
 .../2.0.6/ZOOKEEPER/test_zookeeper_client.py    |   8 +-
 .../2.0.6/ZOOKEEPER/test_zookeeper_server.py    |  12 +-
 .../stacks/2.1/YARN/test_apptimelineserver.py   |   4 +-
 56 files changed, 894 insertions(+), 304 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index e647c11..e3bae5d 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -26,6 +26,7 @@ import os
 import sys
 import logging
 import platform
+import inspect
 import tarfile
 from ambari_commons import OSCheck, OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
@@ -427,18 +428,19 @@ class Script(object):
     sys.exit(1)
 
 
-  def start(self, env, rolling_restart=False):
+  def start(self, env, upgrade_type=None):
     """
     To be overridden by subclasses
     """
     self.fail_with_error("start method isn't implemented")
 
-  def stop(self, env, rolling_restart=False):
+  def stop(self, env, upgrade_type=None):
     """
     To be overridden by subclasses
     """
     self.fail_with_error("stop method isn't implemented")
 
+  # TODO, remove after all services have switched to pre_upgrade_restart
   def pre_rolling_restart(self, env):
     """
     To be overridden by subclasses
@@ -463,45 +465,67 @@ class Script(object):
       command_params = config["commandParams"] if "commandParams" in config else None
       if command_params is not None:
         restart_type = command_params["restart_type"] if "restart_type" in command_params else ""
-        if restart_type:
-          restart_type = restart_type.encode('ascii', 'ignore')
 
-    rolling_restart = restart_type.lower().startswith("rolling")
+    upgrade_type = None
+    if restart_type.lower() == "rolling_upgrade":
+      upgrade_type = "rolling"
+    elif restart_type.lower() == "nonrolling_upgrade":
+      upgrade_type = "nonrolling"
+
+    is_stack_upgrade = upgrade_type is not None
 
     if componentCategory and componentCategory.strip().lower() == 'CLIENT'.lower():
-      if rolling_restart:
-        self.pre_rolling_restart(env)
+      if is_stack_upgrade:
+        # Remain backward compatible with the rest of the services that haven't switched to using
+        # the pre_upgrade_restart method. Once done. remove the else-block.
+        if "pre_upgrade_restart" in dir(self):
+          self.pre_upgrade_restart(env, upgrade_type=upgrade_type)
+        else:
+          self.pre_rolling_restart(env)
 
       self.install(env)
     else:
-      # To remain backward compatible with older stacks, only pass rolling_restart if True.
-      if rolling_restart:
-        self.stop(env, rolling_restart=rolling_restart)
+      # To remain backward compatible with older stacks, only pass upgrade_type if available.
+      # TODO, remove checking the argspec for "upgrade_type" once all of the services support that optional param.
+      if is_stack_upgrade and "upgrade_type" in inspect.getargspec(self.stop).args:
+        self.stop(env, upgrade_type=upgrade_type)
       else:
-        self.stop(env)
-
-      if rolling_restart:
-        self.pre_rolling_restart(env)
-
-      # To remain backward compatible with older stacks, only pass rolling_restart if True.
-      if rolling_restart:
-        self.start(env, rolling_restart=rolling_restart)
+        self.stop(env, rolling_restart=(restart_type == "rolling_upgrade"))
+
+      if is_stack_upgrade:
+        # Remain backward compatible with the rest of the services that haven't switched to using
+        # the pre_upgrade_restart method. Once done. remove the else-block.
+        if "pre_upgrade_restart" in dir(self):
+          self.pre_upgrade_restart(env, upgrade_type=upgrade_type)
+        else:
+          self.pre_rolling_restart(env)
+
+      # To remain backward compatible with older stacks, only pass upgrade_type if available.
+      # TODO, remove checking the argspec for "upgrade_type" once all of the services support that optional param.
+      if is_stack_upgrade and "upgrade_type" in inspect.getargspec(self.start).args:
+        self.start(env, upgrade_type=upgrade_type)
       else:
-        self.start(env)
+        self.start(env, rolling_restart=(restart_type == "rolling_upgrade"))
 
-      if rolling_restart:
-        self.post_rolling_restart(env)
+      if is_stack_upgrade:
+        # Remain backward compatible with the rest of the services that haven't switched to using
+        # the post_upgrade_restart method. Once done. remove the else-block.
+        if "post_upgrade_restart" in dir(self):
+          self.post_upgrade_restart(env, upgrade_type=upgrade_type)
+        else:
+          self.post_rolling_restart(env)
 
     if self.should_expose_component_version("restart"):
       self.save_component_version_to_structured_out()
 
+  # TODO, remove after all services have switched to post_upgrade_restart
   def post_rolling_restart(self, env):
     """
     To be overridden by subclasses
     """
     pass
 
-  def configure(self, env, rolling_restart=False):
+  def configure(self, env, upgrade_type=None):
     """
     To be overridden by subclasses
     """

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
index a139446..e98f730 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
@@ -134,8 +134,16 @@ public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
 
     for (Map<String, Object> propertyMap: propertyMaps) {
       final String clusterName = propertyMap.get(UPGRADE_CHECK_CLUSTER_NAME_PROPERTY_ID).toString();
-      final UpgradeType upgradeType = propertyMap.containsKey(UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID) ?
-          UpgradeType.valueOf(propertyMap.get(UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID).toString()) : UpgradeType.ROLLING;
+
+      UpgradeType upgradeType = UpgradeType.ROLLING;
+      if (propertyMap.containsKey(UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID)) {
+        try {
+          upgradeType = UpgradeType.valueOf(propertyMap.get(UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID).toString());
+        } catch(Exception e){
+          throw new SystemException(String.format("Property %s has an incorrect value of %s.", UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID, propertyMap.get(UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID)));
+        }
+      }
+
       final Cluster cluster;
 
       try {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 82ce49f..78c36f8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -492,8 +492,14 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     String preferredUpgradePackName = (String) requestMap.get(UPGRADE_PACK);
 
     // Default to ROLLING upgrade, but attempt to read from properties.
-    final UpgradeType upgradeType = requestMap.containsKey(UPGRADE_TYPE) ?
-        UpgradeType.valueOf(requestMap.get(UPGRADE_TYPE).toString()) : UpgradeType.ROLLING;
+    UpgradeType upgradeType = UpgradeType.ROLLING;
+    if (requestMap.containsKey(UPGRADE_TYPE)) {
+      try {
+        upgradeType = UpgradeType.valueOf(requestMap.get(UPGRADE_TYPE).toString());
+      } catch(Exception e){
+        throw new AmbariException(String.format("Property %s has an incorrect value of %s.", UPGRADE_TYPE, requestMap.get(UPGRADE_TYPE)));
+      }
+    }
 
     if (null == clusterName) {
       throw new AmbariException(String.format("%s is required", UPGRADE_CLUSTER_NAME));
@@ -526,8 +532,15 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     boolean skipPrereqChecks = Boolean.parseBoolean((String) requestMap.get(UPGRADE_SKIP_PREREQUISITE_CHECKS));
     boolean failOnCheckWarnings = Boolean.parseBoolean((String) requestMap.get(UPGRADE_FAIL_ON_CHECK_WARNINGS));
     String preferredUpgradePack = requestMap.containsKey(UPGRADE_PACK) ? (String) requestMap.get(UPGRADE_PACK) : null;
-    UpgradeType upgradeType = requestMap.containsKey(UPGRADE_TYPE) ?
-        UpgradeType.valueOf(requestMap.get(UPGRADE_TYPE).toString()) : UpgradeType.ROLLING;
+
+    UpgradeType upgradeType = UpgradeType.ROLLING;
+    if (requestMap.containsKey(UPGRADE_TYPE)) {
+      try {
+        upgradeType = UpgradeType.valueOf(requestMap.get(UPGRADE_TYPE).toString());
+      } catch(Exception e){
+        throw new AmbariException(String.format("Property %s has an incorrect value of %s.", UPGRADE_TYPE, requestMap.get(UPGRADE_TYPE)));
+      }
+    }
 
     // Validate there isn't an direction == upgrade/downgrade already in progress.
     List<UpgradeEntity> upgrades = s_upgradeDAO.findUpgrades(cluster.getClusterId());
@@ -1061,6 +1074,23 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     }
   }
 
+  /**
+   * Modify the commandParams by applying additional parameters from the stage.
+   * @param wrapper Stage Wrapper that may contain additional parameters.
+   * @param commandParams Parameters to modify.
+   */
+  private void applyAdditionalParameters(StageWrapper wrapper, Map<String, String> commandParams) {
+    if (wrapper.getParams() != null) {
+      Iterator it = wrapper.getParams().entrySet().iterator();
+      while (it.hasNext()) {
+        Map.Entry<String, String> pair = (Map.Entry) it.next();
+        if (!commandParams.containsKey(pair.getKey())) {
+          commandParams.put(pair.getKey(), pair.getValue());
+        }
+      }
+    }
+  }
+
   private void makeActionStage(UpgradeContext context, RequestStageContainer request,
       UpgradeItemEntity entity, StageWrapper wrapper, boolean skippable, boolean allowRetry)
           throws AmbariException {
@@ -1084,6 +1114,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     params.put(COMMAND_PARAM_TARGET_STACK, context.getTargetStackId().getStackId());
     params.put(COMMAND_DOWNGRADE_FROM_VERSION, context.getDowngradeFromVersion());
 
+    // Apply additional parameters to the command that come from the stage.
+    applyAdditionalParameters(wrapper, params);
+
     // Because custom task may end up calling a script/function inside a
     // service, it is necessary to set the
     // service_package_folder and hooks_folder params.
@@ -1192,6 +1225,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     commandParams.put(COMMAND_PARAM_TARGET_STACK, context.getTargetStackId().getStackId());
     commandParams.put(COMMAND_DOWNGRADE_FROM_VERSION, context.getDowngradeFromVersion());
 
+    // Apply additional parameters to the command that come from the stage.
+    applyAdditionalParameters(wrapper, commandParams);
+
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         function, filters, commandParams);
     actionContext.setTimeout(Short.valueOf(s_configuration.getDefaultAgentTaskTimeout(false)));
@@ -1244,6 +1280,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     commandParams.put(COMMAND_PARAM_TARGET_STACK, context.getTargetStackId().getStackId());
     commandParams.put(COMMAND_DOWNGRADE_FROM_VERSION, context.getDowngradeFromVersion());
 
+    // Apply additional parameters to the command that come from the stage.
+    applyAdditionalParameters(wrapper, commandParams);
+
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         "SERVICE_CHECK", filters, commandParams);
 
@@ -1304,6 +1343,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     commandParams.put(COMMAND_PARAM_TARGET_STACK, context.getTargetStackId().getStackId());
     commandParams.put(COMMAND_DOWNGRADE_FROM_VERSION, context.getDowngradeFromVersion());
 
+    // Notice that this does not apply any params because the input does not specify a stage.
+    // All of the other actions do use additional params.
+
     String itemDetail = entity.getText();
     String stageText = StringUtils.abbreviate(entity.getText(), 255);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
index 55fb12b..22447d7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
@@ -222,6 +222,29 @@ public class MasterHostResolver {
     }
   }
 
+  /**
+   * Determine if HDFS is present and it has NameNode High Availability.
+   * @return true if has NameNode HA, otherwise, false.
+   */
+  public boolean isNameNodeHA() throws AmbariException {
+    Map<String, org.apache.ambari.server.state.Service> services = m_cluster.getServices();
+    if (services != null && services.containsKey("HDFS")) {
+
+      Set<String> secondaryNameNodeHosts = m_cluster.getHosts("HDFS", "SECONDARY_NAMENODE");
+      Set<String> nameNodeHosts = m_cluster.getHosts("HDFS", "NAMENODE");
+
+      if (secondaryNameNodeHosts.size() == 1 && nameNodeHosts.size() == 1) {
+        return false;
+      }
+      if (nameNodeHosts.size() > 1) {
+        return true;
+      }
+
+      throw new AmbariException("Unable to determine if cluster has NameNode HA.");
+    }
+    return false;
+  }
+
 
   /**
    * Get mapping of the HDFS Namenodes from the state ("active" or "standby") to the hostname.

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
index b81d3fd..fd92d21 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
@@ -20,6 +20,7 @@ package org.apache.ambari.server.state;
 import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
@@ -325,12 +326,15 @@ public class UpgradeHelper {
         }
 
         for (String component : service.components) {
+          // Rolling Upgrade has exactly one task for a Component.
           if (upgradePack.getType() == UpgradeType.ROLLING && !allTasks.get(service.serviceName).containsKey(component)) {
             continue;
           }
+
+          // NonRolling Upgrade has several tasks for the same component, since it must first call Stop, perform several
+          // other tasks, and then Start on that Component.
           
           HostsType hostsType = mhr.getMasterAndHosts(service.serviceName, component);
-          // TODO AMBARI-12698, how does this impact SECONDARY NAMENODE if there's no NameNode HA?
           if (null == hostsType) {
             continue;
           }
@@ -368,26 +372,66 @@ public class UpgradeHelper {
 
           setDisplayNames(context, service.serviceName, component);
 
-          // Special case for NAMENODE
+          // Special case for NAMENODE when there are multiple
           if (service.serviceName.equalsIgnoreCase("HDFS") && component.equalsIgnoreCase("NAMENODE")) {
-            // !!! revisit if needed
-            if (!hostsType.hosts.isEmpty() && hostsType.master != null && hostsType.secondary != null) {
-              // The order is important, first do the standby, then the active namenode.
-              LinkedHashSet<String> order = new LinkedHashSet<>();
-
-              order.add(hostsType.secondary);
-              order.add(hostsType.master);
 
-              // Override the hosts with the ordered collection
-              hostsType.hosts = order;
+            // Rolling Upgrade requires first upgrading the Standby, then the Active NameNode.
+            // Whereas NonRolling needs to do the following:
+            //   NameNode HA:  Pick one to the be active, and the other the standby.
+            //   Non-NameNode HA: Upgrade first the SECONDARY, then the primary NAMENODE
+            switch (upgradePack.getType()) {
+              case ROLLING:
+                if (!hostsType.hosts.isEmpty() && hostsType.master != null && hostsType.secondary != null) {
+                  // The order is important, first do the standby, then the active namenode.
+                  LinkedHashSet<String> order = new LinkedHashSet<String>();
+
+                  order.add(hostsType.secondary);
+                  order.add(hostsType.master);
+
+                  // Override the hosts with the ordered collection
+                  hostsType.hosts = order;
+
+                  builder.add(context, hostsType, service.serviceName,
+                      svc.isClientOnlyService(), pc, null);
+                }
+                break;
+              case NON_ROLLING:
+                boolean isNameNodeHA = mhr.isNameNodeHA();
+                if (isNameNodeHA && hostsType.master != null && hostsType.secondary != null) {
+                  // This could be any order, but the NameNodes have to know what role they are going to take.
+                  // So need to make 2 stages, and add different parameters to each one.
+
+                  HostsType ht1 = new HostsType();
+                  LinkedHashSet<String> h1Hosts = new LinkedHashSet<String>();
+                  h1Hosts.add(hostsType.master);
+                  ht1.hosts = h1Hosts;
+                  Map<String, String> h1Params = new HashMap<String, String>();
+                  h1Params.put("desired_namenode_role", "active");
+
+                  HostsType ht2 = new HostsType();
+                  LinkedHashSet<String> h2Hosts = new LinkedHashSet<String>();
+                  h2Hosts.add(hostsType.secondary);
+                  ht2.hosts = h2Hosts;
+                  Map<String, String> h2Params = new HashMap<String, String>();
+                  h2Params.put("desired_namenode_role", "standby");
+
+
+                  builder.add(context, ht1, service.serviceName,
+                      svc.isClientOnlyService(), pc, h1Params);
+
+                  builder.add(context, ht2, service.serviceName,
+                      svc.isClientOnlyService(), pc, h2Params);
+                } else {
+                  // If no NameNode HA, then don't need to change hostsType.hosts since there should be exactly one.
+                  builder.add(context, hostsType, service.serviceName,
+                      svc.isClientOnlyService(), pc, null);
+                }
+
+                break;
             }
-
-            builder.add(context, hostsType, service.serviceName,
-                svc.isClientOnlyService(), pc);
-
           } else {
             builder.add(context, hostsType, service.serviceName,
-                svc.isClientOnlyService(), pc);
+                svc.isClientOnlyService(), pc, null);
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
index ba44408..0e9d2c8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
@@ -108,7 +108,7 @@ public class ClusterGrouping extends Grouping {
 
     @Override
     public void add(UpgradeContext ctx, HostsType hostsType, String service,
-        boolean clientOnly, ProcessingComponent pc) {
+        boolean clientOnly, ProcessingComponent pc, Map<String, String> params) {
       // !!! no-op in this case
     }
 
@@ -261,7 +261,8 @@ public class ClusterGrouping extends Grouping {
       }
 
       return new StageWrapper(
-          StageWrapper.Type.RU_TASKS, execution.title,
+          StageWrapper.Type.RU_TASKS,
+          execution.title,
           new TaskWrapper(service, component, hostNames, et));
     }
     return null;

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
index 2aef43c..11e9267 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
@@ -81,7 +81,7 @@ public class ColocatedGrouping extends Grouping {
 
     @Override
     public void add(UpgradeContext ctx, HostsType hostsType, String service,
-        boolean clientOnly, ProcessingComponent pc) {
+        boolean clientOnly, ProcessingComponent pc, Map<String, String> params) {
 
       boolean forUpgrade = ctx.getDirection().isUpgrade();
 
@@ -110,7 +110,7 @@ public class ColocatedGrouping extends Grouping {
           proxy.clientOnly = clientOnly;
           proxy.message = getStageText("Preparing",
               ctx.getComponentDisplay(service, pc.name), Collections.singleton(host));
-          proxy.tasks.addAll(TaskWrapperBuilder.getTaskList(service, pc.name, singleHostsType, tasks));
+          proxy.tasks.addAll(TaskWrapperBuilder.getTaskList(service, pc.name, singleHostsType, tasks, params));
           proxy.service = service;
           proxy.component = pc.name;
           targetList.add(proxy);
@@ -122,7 +122,7 @@ public class ColocatedGrouping extends Grouping {
           if (RestartTask.class.isInstance(t)) {
             proxy = new TaskProxy();
             proxy.clientOnly = clientOnly;
-            proxy.tasks.add(new TaskWrapper(service, pc.name, Collections.singleton(host), t));
+            proxy.tasks.add(new TaskWrapper(service, pc.name, Collections.singleton(host), params, t));
             proxy.restart = true;
             proxy.service = service;
             proxy.component = pc.name;
@@ -139,7 +139,7 @@ public class ColocatedGrouping extends Grouping {
           proxy.clientOnly = clientOnly;
           proxy.component = pc.name;
           proxy.service = service;
-          proxy.tasks.addAll(TaskWrapperBuilder.getTaskList(service, pc.name, singleHostsType, tasks));
+          proxy.tasks.addAll(TaskWrapperBuilder.getTaskList(service, pc.name, singleHostsType, tasks, params));
           proxy.message = getStageText("Completing",
               ctx.getComponentDisplay(service, pc.name), Collections.singleton(host));
           targetList.add(proxy);

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
index d6db9b1..cd3ee68 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
@@ -21,6 +21,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import javax.xml.bind.annotation.XmlAttribute;
@@ -84,24 +85,26 @@ public class Grouping {
      * @param hostsType the order collection of hosts, which may have a master and secondary
      * @param service the service name
      * @param pc the ProcessingComponent derived from the upgrade pack.
+     * @param params additional parameters
      */
     @Override
     public void add(UpgradeContext ctx, HostsType hostsType, String service,
-       boolean clientOnly, ProcessingComponent pc) {
+       boolean clientOnly, ProcessingComponent pc, Map<String, String> params) {
 
       boolean forUpgrade = ctx.getDirection().isUpgrade();
 
       // Construct the pre tasks during Upgrade/Downgrade direction.
       List<TaskBucket> buckets = buckets(resolveTasks(forUpgrade, true, pc));
       for (TaskBucket bucket : buckets) {
-        List<TaskWrapper> preTasks = TaskWrapperBuilder.getTaskList(service, pc.name, hostsType, bucket.tasks);
+        List<TaskWrapper> preTasks = TaskWrapperBuilder.getTaskList(service, pc.name, hostsType, bucket.tasks, params);
         Set<String> preTasksEffectiveHosts = TaskWrapperBuilder.getEffectiveHosts(preTasks);
         if (!preTasksEffectiveHosts.isEmpty()) {
           StageWrapper stage = new StageWrapper(
               bucket.type,
               getStageText("Preparing", ctx.getComponentDisplay(service, pc.name), preTasksEffectiveHosts),
+              params,
               preTasks
-              );
+          );
           m_stages.add(stage);
         }
       }
@@ -114,7 +117,8 @@ public class Grouping {
           StageWrapper stage = new StageWrapper(
               t.getStageWrapperType(),
               getStageText(t.getActionVerb(), ctx.getComponentDisplay(service, pc.name), Collections.singleton(hostName)),
-              new TaskWrapper(service, pc.name, Collections.singleton(hostName), t));
+              params,
+              new TaskWrapper(service, pc.name, Collections.singleton(hostName), params, t));
           m_stages.add(stage);
         }
       }
@@ -122,12 +126,13 @@ public class Grouping {
       // Construct the post tasks during Upgrade/Downgrade direction.
       buckets = buckets(resolveTasks(forUpgrade, false, pc));
       for (TaskBucket bucket : buckets) {
-        List<TaskWrapper> postTasks = TaskWrapperBuilder.getTaskList(service, pc.name, hostsType, bucket.tasks);
+        List<TaskWrapper> postTasks = TaskWrapperBuilder.getTaskList(service, pc.name, hostsType, bucket.tasks, params);
         Set<String> postTasksEffectiveHosts = TaskWrapperBuilder.getEffectiveHosts(postTasks);
         if (!postTasksEffectiveHosts.isEmpty()) {
           StageWrapper stage = new StageWrapper(
               bucket.type,
               getStageText("Completing", ctx.getComponentDisplay(service, pc.name), postTasksEffectiveHosts),
+              params,
               postTasks
               );
           m_stages.add(stage);

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ServiceCheckGrouping.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ServiceCheckGrouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ServiceCheckGrouping.java
index fec9978..0033185 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ServiceCheckGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ServiceCheckGrouping.java
@@ -103,7 +103,7 @@ public class ServiceCheckGrouping extends Grouping {
      */
     @Override
     public void add(UpgradeContext ctx, HostsType hostsType, String service,
-        boolean clientOnly, ProcessingComponent pc) {
+        boolean clientOnly, ProcessingComponent pc, Map<String, String> params) {
       // !!! nothing to do here
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapper.java
index 92df3b5..2ea3671 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapper.java
@@ -19,8 +19,10 @@ package org.apache.ambari.server.state.stack.upgrade;
 
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import com.google.gson.Gson;
@@ -33,16 +35,41 @@ public class StageWrapper {
   private static Gson gson = new Gson();
   private String text;
   private Type type;
-
+  private Map<String, String> params;
   private List<TaskWrapper> tasks;
 
+  /**
+   * Wrapper for a stage that encapsulates its text and tasks.
+   * @param type Type of stage
+   * @param text Text to display
+   * @param tasks List of tasks to add to the stage
+   */
   public StageWrapper(Type type, String text, TaskWrapper... tasks) {
-    this(type, text, Arrays.asList(tasks));
+    this(type, text, null, Arrays.asList(tasks));
+  }
+
+  /**
+   * Wrapper for a stage that encapsulates its text, params, and tasks.
+   * @param type Type of stage
+   * @param text Text to display
+   * @param params Command parameters
+   * @param tasks List of tasks to add to the stage
+   */
+  public StageWrapper(Type type, String text, Map<String, String> params, TaskWrapper... tasks) {
+    this(type, text, params, Arrays.asList(tasks));
   }
 
-  public StageWrapper(Type type, String text, List<TaskWrapper> tasks) {
+  /**
+   * Wrapper for a stage that encapsulates its text, params, and tasks.
+   * @param type Type of stage
+   * @param text Text to display
+   * @param params Command parameters
+   * @param tasks List of tasks to add to the stage
+   */
+  public StageWrapper(Type type, String text, Map<String, String> params, List<TaskWrapper> tasks) {
     this.type = type;
     this.text = text;
+    this.params = (params == null ? Collections.<String, String>emptyMap() : params);
     this.tasks = tasks;
   }
 
@@ -78,6 +105,13 @@ public class StageWrapper {
   }
 
   /**
+   * @return the additional command parameters
+   */
+  public Map<String, String> getParams() {
+    return params;
+  }
+
+  /**
    * @return the wrapped tasks for this stage
    */
   public List<TaskWrapper> getTasks() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilder.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilder.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilder.java
index d4ee9a8..6ef0980 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilder.java
@@ -21,6 +21,7 @@ import java.util.Collections;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.ambari.server.serveraction.upgrades.AutoSkipFailedSummaryAction;
@@ -66,9 +67,11 @@ public abstract class StageWrapperBuilder {
    *          whether the service is client only, no service checks
    * @param pc
    *          the ProcessingComponent derived from the upgrade pack
+   * @param params
+   *          additional parameters
    */
   public abstract void add(UpgradeContext upgradeContext, HostsType hostsType, String service,
-      boolean clientOnly, ProcessingComponent pc);
+      boolean clientOnly, ProcessingComponent pc, Map<String, String> params);
 
   /**
    * Builds the stage wrappers, including any pre- and post-procesing that needs

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java
index f7cc930..69b3f8b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java
@@ -18,7 +18,9 @@
 package org.apache.ambari.server.state.stack.upgrade;
 
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 /**
@@ -29,6 +31,7 @@ public class TaskWrapper {
   private String service;
   private String component;
   private Set<String> hosts; // all the hosts that all the tasks must run
+  private Map<String, String> params;
   private List<Task> tasks; // all the tasks defined for the hostcomponent
 
   /**
@@ -38,8 +41,20 @@ public class TaskWrapper {
    * @param tasks an array of tasks as a convenience
    */
   public TaskWrapper(String s, String c, Set<String> hosts, Task... tasks) {
-    this(s, c, hosts, Arrays.asList(tasks));
+    this(s, c, hosts, null, Arrays.asList(tasks));
   }
+  
+  /**
+   * @param s the service name for the tasks
+   * @param c the component name for the tasks
+   * @param hosts the set of hosts that the tasks are for
+   * @param params additional command parameters
+   * @param tasks an array of tasks as a convenience
+   */
+  public TaskWrapper(String s, String c, Set<String> hosts, Map<String, String> params, Task... tasks) {
+    this(s, c, hosts, params, Arrays.asList(tasks));
+  }
+
 
   /**
    * @param s the service name for the tasks
@@ -47,15 +62,23 @@ public class TaskWrapper {
    * @param hosts the set of hosts for the
    * @param tasks the list of tasks
    */
-  public TaskWrapper(String s, String c, Set<String> hosts, List<Task> tasks) {
+  public TaskWrapper(String s, String c, Set<String> hosts, Map<String, String> params, List<Task> tasks) {
     service = s;
     component = c;
 
     this.hosts = hosts;
+    this.params = (params == null) ? new HashMap<String, String>() : params;
     this.tasks = tasks;
   }
 
   /**
+   * @return the additional command parameters.
+   */
+  public Map<String, String> getParams() {
+    return params;
+  }
+
+  /**
    * @return the tasks associated with this wrapper
    */
   public List<Task> getTasks() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
index a5813e3..057c310 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
@@ -22,6 +22,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.ambari.server.stack.HostsType;
@@ -43,15 +44,16 @@ public class TaskWrapperBuilder {
    * @param component the component name for the tasks
    * @param hostsType the collection of sets along with their status
    * @param tasks collection of tasks
+   * @param params additional parameters
    */
-  public static List<TaskWrapper> getTaskList(String service, String component, HostsType hostsType, List<Task> tasks) {
+  public static List<TaskWrapper> getTaskList(String service, String component, HostsType hostsType, List<Task> tasks, Map<String, String> params) {
     List<TaskWrapper> collection = new ArrayList<TaskWrapper>();
     for (Task t : tasks) {
       if (t.getType().equals(Task.Type.EXECUTE)) {
         ExecuteTask et = (ExecuteTask) t;
         if (et.hosts == ExecuteHostType.MASTER) {
           if (hostsType.master != null) {
-            collection.add(new TaskWrapper(service, component, Collections.singleton(hostsType.master), t));
+            collection.add(new TaskWrapper(service, component, Collections.singleton(hostsType.master), params, t));
             continue;
           } else {
             LOG.error(MessageFormat.format("Found an Execute task for {0} and {1} meant to run on a master but could not find any masters to run on. Skipping this task.", service, component));
@@ -61,7 +63,7 @@ public class TaskWrapperBuilder {
         // Pick a random host.
         if (et.hosts == ExecuteHostType.ANY) {
           if (hostsType.hosts != null && !hostsType.hosts.isEmpty()) {
-            collection.add(new TaskWrapper(service, component, Collections.singleton(hostsType.hosts.iterator().next()), t));
+            collection.add(new TaskWrapper(service, component, Collections.singleton(hostsType.hosts.iterator().next()), params, t));
             continue;
           } else {
             LOG.error(MessageFormat.format("Found an Execute task for {0} and {1} meant to run on a any host but could not find host to run on. Skipping this task.", service, component));
@@ -70,7 +72,7 @@ public class TaskWrapperBuilder {
         }
       }
 
-      collection.add(new TaskWrapper(service, component, hostsType.hosts, t));
+      collection.add(new TaskWrapper(service, component, hostsType.hosts, params, t));
     }
 
     return collection;

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
index fa68435..1d242e1 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
@@ -27,8 +27,23 @@ from resource_management.libraries.functions.security_commons import build_expec
 from hdfs import hdfs
 from ambari_commons.os_family_impl import OsFamilyImpl
 from ambari_commons import OSConst
+from utils import get_hdfs_binary
 
 class DataNode(Script):
+
+  def get_stack_to_component(self):
+    return {"HDP": "hadoop-hdfs-datanode"}
+
+  def get_hdfs_binary(self):
+    """
+    Get the name or path to the hdfs binary depending on the stack and version.
+    """
+    import params
+    stack_to_comp = self.get_stack_to_component()
+    if params.stack_name in stack_to_comp:
+      return get_hdfs_binary(stack_to_comp[params.stack_name])
+    return "hdfs"
+
   def install(self, env):
     import params
     self.install_packages(env, params.exclude_packages)
@@ -40,19 +55,20 @@ class DataNode(Script):
     hdfs("datanode")
     datanode(action="configure")
 
-  def start(self, env, rolling_restart=False):
+  def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     self.configure(env)
     datanode(action="start")
 
-  def stop(self, env, rolling_restart=False):
+  def stop(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     # pre-upgrade steps shutdown the datanode, so there's no need to call
-    # action=stop
-    if rolling_restart:
-      stopped = datanode_upgrade.pre_upgrade_shutdown()
+
+    hdfs_binary = self.get_hdfs_binary()
+    if upgrade_type == "rolling":
+      stopped = datanode_upgrade.pre_rolling_upgrade_shutdown(hdfs_binary)
       if not stopped:
         datanode(action="stop")
     else:
@@ -67,23 +83,21 @@ class DataNode(Script):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class DataNodeDefault(DataNode):
 
-  def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-datanode"}
-
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing DataNode Rolling Upgrade pre-restart")
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing DataNode Stack Upgrade pre-restart")
     import params
     env.set_params(params)
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       hdp_select.select("hadoop-hdfs-datanode", params.version)
 
-  def post_rolling_restart(self, env):
-    Logger.info("Executing DataNode Rolling Upgrade post-restart")
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing DataNode Stack Upgrade post-restart")
     import params
     env.set_params(params)
+    hdfs_binary = self.get_hdfs_binary()
     # ensure the DataNode has started and rejoined the cluster
-    datanode_upgrade.post_upgrade_check()
+    datanode_upgrade.post_upgrade_check(hdfs_binary)
 
   def security_status(self, env):
     import status_params

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
index 2e5ac19..6138f8c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
@@ -26,12 +26,13 @@ from resource_management.libraries.functions import format
 from resource_management.libraries.functions.decorator import retry
 
 
-def pre_upgrade_shutdown():
+def pre_rolling_upgrade_shutdown(hdfs_binary):
   """
   Runs the "shutdownDatanode {ipc_address} upgrade" command to shutdown the
   DataNode in preparation for an upgrade. This will then periodically check
   "getDatanodeInfo" to ensure the DataNode has shutdown correctly.
   This function will obtain the Kerberos ticket if security is enabled.
+  :param hdfs_binary: name/path of the HDFS binary to use
   :return: Return True if ran ok (even with errors), and False if need to stop the datanode forcefully.
   """
   import params
@@ -40,38 +41,39 @@ def pre_upgrade_shutdown():
   if params.security_enabled:
     Execute(params.dn_kinit_cmd, user = params.hdfs_user)
 
-  command = format('hdfs dfsadmin -shutdownDatanode {dfs_dn_ipc_address} upgrade')
+  command = format('{hdfs_binary} dfsadmin -shutdownDatanode {dfs_dn_ipc_address} upgrade')
 
   code, output = shell.call(command, user=params.hdfs_user)
   if code == 0:
     # verify that the datanode is down
-    _check_datanode_shutdown()
+    _check_datanode_shutdown(hdfs_binary)
   else:
-    # Due to bug HDFS-7533, DataNode may not always shutdown during rolling upgrade, and it is necessary to kill it.
+    # Due to bug HDFS-7533, DataNode may not always shutdown during stack upgrade, and it is necessary to kill it.
     if output is not None and re.search("Shutdown already in progress", output):
       Logger.error("Due to a known issue in DataNode, the command {0} did not work, so will need to shutdown the datanode forcefully.".format(command))
       return False
   return True
 
 
-def post_upgrade_check():
+def post_upgrade_check(hdfs_binary):
   """
   Verifies that the DataNode has rejoined the cluster. This function will
   obtain the Kerberos ticket if security is enabled.
+  :param hdfs_binary: name/path of the HDFS binary to use
   :return:
   """
   import params
 
   Logger.info("Checking that the DataNode has rejoined the cluster after upgrade...")
   if params.security_enabled:
-    Execute(params.dn_kinit_cmd,user = params.hdfs_user)
+    Execute(params.dn_kinit_cmd, user=params.hdfs_user)
 
   # verify that the datanode has started and rejoined the HDFS cluster
-  _check_datanode_startup()
+  _check_datanode_startup(hdfs_binary)
 
 
 @retry(times=24, sleep_time=5, err_class=Fail)
-def _check_datanode_shutdown():
+def _check_datanode_shutdown(hdfs_binary):
   """
   Checks that a DataNode is down by running "hdfs dfsamin getDatanodeInfo"
   several times, pausing in between runs. Once the DataNode stops responding
@@ -84,13 +86,14 @@ def _check_datanode_shutdown():
   https://issues.apache.org/jira/browse/HDFS-8510 tracks reducing the
   times for ipc.client.connect.retry.interval. In the meantime, override them
   here, but only for RU.
+  :param hdfs_binary: name/path of the HDFS binary to use
   :return:
   """
   import params
 
   # override stock retry timeouts since after 30 seconds, the datanode is
   # marked as dead and can affect HBase during RU
-  command = format('hdfs dfsadmin -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo {dfs_dn_ipc_address}')
+  command = format('{hdfs_binary} dfsadmin -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo {dfs_dn_ipc_address}')
 
   try:
     Execute(command, user=params.hdfs_user, tries=1)
@@ -103,19 +106,19 @@ def _check_datanode_shutdown():
 
 
 @retry(times=12, sleep_time=10, err_class=Fail)
-def _check_datanode_startup():
+def _check_datanode_startup(hdfs_binary):
   """
   Checks that a DataNode is reported as being alive via the
   "hdfs dfsadmin -report -live" command. Once the DataNode is found to be
   alive this method will return, otherwise it will raise a Fail(...) and retry
   automatically.
+  :param hdfs_binary: name/path of the HDFS binary to use
   :return:
   """
   import params
 
   try:
-    # 'su - hdfs -c "hdfs dfsadmin -report -live"'
-    command = 'hdfs dfsadmin -report -live'
+    command = format('{hdfs_binary} dfsadmin -report -live')
     return_code, hdfs_output = shell.call(command, user=params.hdfs_user)
   except:
     raise Fail('Unable to determine if the DataNode has started after upgrade.')

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
index dd0dca4..16218b6 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
@@ -40,11 +40,11 @@ class HdfsClient(Script):
     env.set_params(params)
     hdfs()
 
-  def start(self, env, rolling_restart=False):
+  def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 
-  def stop(self, env, rolling_restart=False):
+  def stop(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 
@@ -57,7 +57,7 @@ class HdfsClientDefault(HdfsClient):
   def get_stack_to_component(self):
     return {"HDP": "hadoop-client"}
 
-  def pre_rolling_restart(self, env):
+  def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index b11d7ea..98b8afd 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -36,18 +36,27 @@ from resource_management.core.shell import as_user
 from resource_management.core.exceptions import Fail
 from resource_management.core.logger import Logger
 
-from utils import service, safe_zkfc_op
+from utils import service, safe_zkfc_op, is_previous_fs_image
 from setup_ranger_hdfs import setup_ranger_hdfs
+from namenode_ha_state import NAMENODE_STATE, NamenodeHAState
+
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def namenode(action=None, do_format=True, rolling_restart=False, env=None):
+def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None, env=None):
+  if action is None:
+    raise Fail('"action" parameter is required for function namenode().')
+
+  if action in ["start", "stop"] and hdfs_binary is None:
+    raise Fail('"hdfs_binary" parameter is required for function namenode().')
+
   if action == "configure":
     import params
     #we need this directory to be present before any action(HA manual steps for
     #additional namenode)
     create_name_dirs(params.dfs_name_dir)
   elif action == "start":
-    setup_ranger_hdfs(rolling_upgrade = rolling_restart)
+    Logger.info("Called service {0} with upgrade_type: {1}".format(action, str(upgrade_type)))
+    setup_ranger_hdfs(upgrade_type=upgrade_type)
     import params
     if do_format:
       format_namenode()
@@ -70,13 +79,33 @@ def namenode(action=None, do_format=True, rolling_restart=False, env=None):
         if not success:
           raise Fail("Could not bootstrap standby namenode")
 
-    options = "-rollingUpgrade started" if rolling_restart else ""
-
-    if rolling_restart:
+    if upgrade_type == "rolling" and params.dfs_ha_enabled:
       # Most likely, ZKFC is up since RU will initiate the failover command. However, if that failed, it would have tried
       # to kill ZKFC manually, so we need to start it if not already running.
       safe_zkfc_op(action, env)
 
+    options = ""
+    if upgrade_type == "rolling":
+      options = "-rollingUpgrade started"
+    elif upgrade_type == "nonrolling":
+      is_previous_image_dir = is_previous_fs_image()
+      Logger.info(format("Previous file system image dir present is {is_previous_image_dir}"))
+
+      if params.dfs_ha_enabled:
+        if params.desired_namenode_role is None:
+          raise Fail("Did not receive parameter \"desired_namenode_role\" to indicate the role that this NameNode should have.")
+
+        if params.desired_namenode_role == "active":
+          # The "-upgrade" command can only be used exactly once. If used more than once during a retry, it will cause problems.
+          options = "" if is_previous_image_dir else "-upgrade"
+
+        if params.desired_namenode_role == "standby":
+          options = "-bootstrapStandby -force"
+      else:
+        # Both Primary and Secondary NameNode can use the same command.
+        options = "" if is_previous_image_dir else "-upgrade"
+    Logger.info(format("Option for start command: {options}"))
+
     service(
       action="start",
       name="namenode",
@@ -90,53 +119,66 @@ def namenode(action=None, do_format=True, rolling_restart=False, env=None):
       Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
               user = params.hdfs_user)
 
-    is_namenode_safe_mode_off = format("hdfs dfsadmin -fs {namenode_address} -safemode get | grep 'Safe mode is OFF'")
+    is_namenode_safe_mode_off = format("{hdfs_binary} dfsadmin -fs {namenode_address} -safemode get | grep 'Safe mode is OFF'")
     if params.dfs_ha_enabled:
-      is_active_namenode_cmd = as_user(format("hdfs --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active"), params.hdfs_user, env={'PATH':params.hadoop_bin_dir})
+      is_active_namenode_cmd = as_user(format("{hdfs_binary} --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active"), params.hdfs_user, env={'PATH':params.hadoop_bin_dir})
     else:
       is_active_namenode_cmd = None
+    
+    # During NonRolling Upgrade, both NameNodes are initially down,
+    # so no point in checking if this is the active or standby.
+    if upgrade_type == "nonrolling":
+      is_active_namenode_cmd = None
 
-    # During normal operations, if HA is enabled and it is in standby, then no need to check safemode staus.
-    # During Rolling Upgrade, both namenodes must eventually leave safemode, and Ambari can wait for this.
+    # ___Scenario___________|_Expected safemode state__|_Wait for safemode OFF____|
+    # no-HA                 | ON -> OFF                | Yes                      |
+    # HA and active         | ON -> OFF                | Yes                      |
+    # HA and standby        | no change                | no check                 |
+    # RU with HA on active  | ON -> OFF                | Yes                      |
+    # RU with HA on standby | ON -> OFF                | Yes                      |
+    # EU with HA on active  | no change                | no check                 |
+    # EU with HA on standby | no change                | no check                 |
+    # EU non-HA             | no change                | no check                 |
 
-    # ___Scenario_________|_Expected safemode state___|_Wait for safemode OFF____|
-    # 1 (HA and active)   | ON -> OFF                 | Yes                      |
-    # 2 (HA and standby)  | no change (yes during RU) | no check (yes during RU) |
-    # 3 (no-HA)           | ON -> OFF                 | Yes                      |
     check_for_safemode_off = False
     msg = ""
     if params.dfs_ha_enabled:
-      code, out = shell.call(is_active_namenode_cmd, logoutput=True) # If active NN, code will be 0
-      if code == 0: # active
-        check_for_safemode_off = True
-        msg = "Must wait to leave safemode since High Availability is enabled and this is the Active NameNode."
-      elif rolling_restart:
+      if upgrade_type is not None:
         check_for_safemode_off = True
-        msg = "Must wait to leave safemode since High Availability is enabled during a Rolling Upgrade"
+        msg = "Must wait to leave safemode since High Availability is enabled during a Stack Upgrade"
+      else:
+        # During normal operations, the NameNode is expected to be up.
+        code, out = shell.call(is_active_namenode_cmd, logoutput=True) # If active NN, code will be 0
+        if code == 0: # active
+          check_for_safemode_off = True
+          msg = "Must wait to leave safemode since High Availability is enabled and this is the Active NameNode."
+        else:
+          msg = "Will remain in the current safemode state."
     else:
       msg = "Must wait to leave safemode since High Availability is not enabled."
       check_for_safemode_off = True
 
-    if not msg:
-      msg = "Will remain in the current safemode state."
     Logger.info(msg)
 
+    # During a NonRolling (aka Express Upgrade), stay in safemode since the DataNodes are down.
+    stay_in_safe_mode = False
+    if upgrade_type == "nonrolling":
+      stay_in_safe_mode = True
+
     if check_for_safemode_off:
-      # First check if Namenode is not in 'safemode OFF' (equivalent to safemode ON). If safemode is OFF, no change.
-      # If safemode is ON, first wait for NameNode to leave safemode on its own (if that doesn't happen within 30 seconds, then
-      # force NameNode to leave safemode).
-      Logger.info("Checking the NameNode safemode status since may need to transition from ON to OFF.")
-
-      try:
-        # Wait up to 30 mins
-        Execute(is_namenode_safe_mode_off,
-                tries=180,
-                try_sleep=10,
-                user=params.hdfs_user,
-                logoutput=True
-        )
-      except Fail:
-        Logger.error("NameNode is still in safemode, please be careful with commands that need safemode OFF.")
+      Logger.info("Stay in safe mode: {0}".format(stay_in_safe_mode))
+      if not stay_in_safe_mode:
+        Logger.info("Wait to leafe safemode since must transition from ON to OFF.")
+        try:
+          # Wait up to 30 mins
+          Execute(is_namenode_safe_mode_off,
+                  tries=180,
+                  try_sleep=10,
+                  user=params.hdfs_user,
+                  logoutput=True
+          )
+        except Fail:
+          Logger.error("NameNode is still in safemode, please be careful with commands that need safemode OFF.")
 
     # Always run this on non-HA, or active NameNode during HA.
     create_hdfs_directories(is_active_namenode_cmd)
@@ -154,7 +196,13 @@ def namenode(action=None, do_format=True, rolling_restart=False, env=None):
     decommission()
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def namenode(action=None, do_format=True, rolling_restart=False, env=None):
+def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None, env=None):
+  if action is None:
+    raise Fail('"action" parameter is required for function namenode().')
+
+  if action in ["start", "stop"] and hdfs_binary is None:
+    raise Fail('"hdfs_binary" parameter is required for function namenode().')
+
   if action == "configure":
     pass
   elif action == "start":

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
index 46c7272..2ef1b69 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
@@ -45,8 +45,8 @@ class JournalNodeDefault(JournalNode):
   def get_stack_to_component(self):
     return {"HDP": "hadoop-hdfs-journalnode"}
 
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade pre-restart")
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
@@ -54,7 +54,7 @@ class JournalNodeDefault(JournalNode):
       conf_select.select(params.stack_name, "hadoop", params.version)
       hdp_select.select("hadoop-hdfs-journalnode", params.version)
 
-  def start(self, env, rolling_restart=False):
+  def start(self, env, upgrade_type=None):
     import params
 
     env.set_params(params)
@@ -65,13 +65,16 @@ class JournalNodeDefault(JournalNode):
       create_log_dir=True
     )
 
-  def post_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade post-restart")
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    if upgrade_type == "nonrolling":
+      return
+
+    Logger.info("Executing Stack Upgrade post-restart")
     import params
     env.set_params(params)
     journalnode_upgrade.post_upgrade_check()
 
-  def stop(self, env, rolling_restart=False):
+  def stop(self, env, upgrade_type=None):
     import params
 
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
index e2ebbcb..850c32d 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
@@ -31,7 +31,7 @@ from namenode_ha_state import NAMENODE_STATE, NamenodeHAState
 
 def post_upgrade_check():
   """
-  Ensure all journal nodes are up and quorum is established
+  Ensure all journal nodes are up and quorum is established during Rolling Upgrade.
   :return:
   """
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 93bbc0f..bb60ec3 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -42,11 +42,13 @@ from resource_management.core.logger import Logger
 from ambari_commons.os_family_impl import OsFamilyImpl
 from ambari_commons import OSConst
 
+
 import namenode_upgrade
 from hdfs_namenode import namenode
 from hdfs import hdfs
 import hdfs_rebalance
-from utils import initiate_safe_zkfc_failover
+from utils import initiate_safe_zkfc_failover, get_hdfs_binary
+
 
 
 # hashlib is supplied as of Python 2.5 as the replacement interface for md5
@@ -62,6 +64,19 @@ except ImportError:
 
 class NameNode(Script):
 
+  def get_stack_to_component(self):
+    return {"HDP": "hadoop-hdfs-namenode"}
+
+  def get_hdfs_binary(self):
+    """
+    Get the name or path to the hdfs binary depending on the stack and version.
+    """
+    import params
+    stack_to_comp = self.get_stack_to_component()
+    if params.stack_name in stack_to_comp:
+      return get_hdfs_binary(stack_to_comp[params.stack_name])
+    return "hdfs"
+
   def install(self, env):
     import params
     self.install_packages(env, params.exclude_packages)
@@ -73,40 +88,41 @@ class NameNode(Script):
     import params
     env.set_params(params)
     hdfs("namenode")
-    namenode(action="configure", env=env)
+    hdfs_binary = self.get_hdfs_binary()
+    namenode(action="configure", hdfs_binary=hdfs_binary, env=env)
 
-  def start(self, env, rolling_restart=False):
+  def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     self.configure(env)
-    namenode(action="start", rolling_restart=rolling_restart, env=env)
+    hdfs_binary = self.get_hdfs_binary()
+    namenode(action="start", hdfs_binary=hdfs_binary, upgrade_type=upgrade_type, env=env)
 
-  def stop(self, env, rolling_restart=False):
+  def stop(self, env, upgrade_type=None):
     import params
     env.set_params(params)
-    if rolling_restart and params.dfs_ha_enabled:
+    hdfs_binary = self.get_hdfs_binary()
+    if upgrade_type == "rolling" and params.dfs_ha_enabled:
       if params.dfs_ha_automatic_failover_enabled:
         initiate_safe_zkfc_failover()
       else:
         raise Fail("Rolling Upgrade - dfs.ha.automatic-failover.enabled must be enabled to perform a rolling restart")
-    namenode(action="stop", rolling_restart=rolling_restart, env=env)
+    namenode(action="stop", hdfs_binary=hdfs_binary, upgrade_type=upgrade_type, env=env)
 
   def status(self, env):
     import status_params
     env.set_params(status_params)
-    namenode(action="status", rolling_restart=False, env=env)
+    namenode(action="status", env=env)
 
   def decommission(self, env):
     import params
     env.set_params(params)
-    namenode(action="decommission")
+    hdfs_binary = self.get_hdfs_binary()
+    namenode(action="decommission", hdfs_binary=hdfs_binary)
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class NameNodeDefault(NameNode):
 
-  def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-namenode"}
-
   def restore_snapshot(self, env):
     """
     Restore the snapshot during a Downgrade.
@@ -115,21 +131,73 @@ class NameNodeDefault(NameNode):
     pass
 
   def prepare_non_rolling_upgrade(self, env):
-    print "TODO AMBARI-12698"
-    pass
+    """
+    If in HA, on the Active NameNode only, examine the directory dfs.namenode.name.dir and
+    make sure that there is no "/previous" directory.
+
+    Create a list of all the DataNodes in the cluster.
+    hdfs dfsadmin -report > dfs-old-report-1.log
+
+    hdfs dfsadmin -safemode enter
+    hdfs dfsadmin -saveNamespace
+
+    Copy the checkpoint files located in ${dfs.namenode.name.dir}/current into a backup directory.
+
+    Store the layoutVersion for the NameNode located at ${dfs.namenode.name.dir}/current/VERSION, into a backup directory
+
+    Finalize any prior HDFS upgrade,
+    hdfs dfsadmin -finalizeUpgrade
+    """
+    import params
+    Logger.info("Preparing the NameNodes for a NonRolling (aka Express) Upgrade.")
+
+    if params.security_enabled:
+      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+              user=params.hdfs_user)
+
+    hdfs_binary = self.get_hdfs_binary()
+    namenode_upgrade.prepare_upgrade_check_for_previous_dir()
+    namenode_upgrade.prepare_upgrade_enter_safe_mode(hdfs_binary)
+    namenode_upgrade.prepare_upgrade_save_namespace(hdfs_binary)
+    namenode_upgrade.prepare_upgrade_backup_namenode_dir()
+    namenode_upgrade.prepare_upgrade_finalize_previous_upgrades(hdfs_binary)
 
   def prepare_rolling_upgrade(self, env):
-    namenode_upgrade.prepare_rolling_upgrade()
+    hfds_binary = self.get_hdfs_binary()
+    namenode_upgrade.prepare_rolling_upgrade(hfds_binary)
+
+  def wait_for_safemode_off(self, env):
+    """
+    During NonRolling (aka Express Upgrade), after starting NameNode, which is still in safemode, and then starting
+    all of the DataNodes, we need for NameNode to receive all of the block reports and leave safemode.
+    """
+    import params
+
+    Logger.info("Wait to leafe safemode since must transition from ON to OFF.")
+    try:
+      hdfs_binary = self.get_hdfs_binary()
+      # Note, this fails if namenode_address isn't prefixed with "params."
+      is_namenode_safe_mode_off = format("{hdfs_binary} dfsadmin -fs {params.namenode_address} -safemode get | grep 'Safe mode is OFF'")
+      # Wait up to 30 mins
+      Execute(is_namenode_safe_mode_off,
+              tries=180,
+              try_sleep=10,
+              user=params.hdfs_user,
+              logoutput=True
+      )
+    except Fail:
+      Logger.error("NameNode is still in safemode, please be careful with commands that need safemode OFF.")
 
   def finalize_non_rolling_upgrade(self, env):
-    print "TODO AMBARI-12698"
-    pass
+    hfds_binary = self.get_hdfs_binary()
+    namenode_upgrade.finalize_upgrade("nonrolling", hfds_binary)
 
   def finalize_rolling_upgrade(self, env):
-    namenode_upgrade.finalize_rolling_upgrade()
+    hfds_binary = self.get_hdfs_binary()
+    namenode_upgrade.finalize_upgrade("rolling", hfds_binary)
 
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade pre-restart")
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
@@ -137,12 +205,13 @@ class NameNodeDefault(NameNode):
       conf_select.select(params.stack_name, "hadoop", params.version)
       hdp_select.select("hadoop-hdfs-namenode", params.version)
 
-  def post_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade post-restart")
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade post-restart")
     import params
     env.set_params(params)
 
-    Execute("hdfs dfsadmin -report -live",
+    hdfs_binary = self.get_hdfs_binary()
+    Execute(format("{hdfs_binary} dfsadmin -report -live"),
             user=params.hdfs_user
     )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_ha_state.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_ha_state.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_ha_state.py
index e8c142c..d6b6225 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_ha_state.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_ha_state.py
@@ -180,6 +180,33 @@ class NamenodeHAState:
       return self.get_address_for_host(hostname)
     return None
 
+  def is_active(self, host_name):
+    """
+    :param host_name: Host name
+    :return: Return True if this is the active NameNode, otherwise, False.
+    """
+    return self._is_in_state(host_name, NAMENODE_STATE.ACTIVE)
+
+  def is_standby(self, host_name):
+    """
+    :param host_name: Host name
+    :return: Return True if this is the standby NameNode, otherwise, False.
+    """
+    return self._is_in_state(host_name, NAMENODE_STATE.STANDBY)
+
+  def _is_in_state(self, host_name, state):
+    """
+    :param host_name: Host name
+    :param state: State to check
+    :return: Return True if this NameNode is in the specified state, otherwise, False.
+    """
+    mapping = self.get_namenode_state_to_hostnames()
+    if state in mapping:
+      hosts_in_state = mapping[state]
+      if hosts_in_state is not None and len(hosts_in_state) == 1 and next(iter(hosts_in_state)).lower() == host_name.lower():
+        return True
+    return False
+
   def is_healthy(self):
     """
     :return: Returns a bool indicating if exactly one ACTIVE and one STANDBY host exist.

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
index fb39878..c8c057d 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
@@ -17,26 +17,148 @@ limitations under the License.
 
 """
 import re
+import os
 
 from resource_management.core.logger import Logger
 from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.default import default
 from resource_management.core import shell
-from resource_management.libraries.functions import Direction, SafeMode
+from resource_management.core.shell import as_user
 from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import get_unique_id_and_date
+from resource_management.libraries.functions import Direction, SafeMode
+
+from namenode_ha_state import NamenodeHAState
 
 
 safemode_to_instruction = {SafeMode.ON: "enter",
                            SafeMode.OFF: "leave"}
 
-def reach_safemode_state(user, safemode_state, in_ha):
+
+def prepare_upgrade_check_for_previous_dir():
+  """
+  During a NonRolling (aka Express Upgrade), preparing the NameNode requires backing up some data.
+  Check that there is no "previous" folder inside the NameNode Name Dir.
+  """
+  import params
+
+  if params.dfs_ha_enabled:
+    namenode_ha = NamenodeHAState()
+    if namenode_ha.is_active(params.hostname):
+      Logger.info("NameNode High Availability is enabled and this is the Active NameNode.")
+
+      problematic_previous_namenode_dirs = set()
+      nn_name_dirs = params.dfs_name_dir.split(',')
+      for nn_dir in nn_name_dirs:
+        if os.path.isdir(nn_dir):
+          # Check for a previous folder, which is not allowed.
+          previous_dir = os.path.join(nn_dir, "previous")
+          if os.path.isdir(previous_dir):
+            problematic_previous_namenode_dirs.add(previous_dir)
+
+      if len(problematic_previous_namenode_dirs) > 0:
+        message = 'WARNING. The following NameNode Name Dir(s) have a "previous" folder from an older version.\n' \
+                  'Please back it up first, and then delete it, OR Finalize (E.g., "hdfs dfsadmin -finalizeUpgrade").\n' \
+                  'NameNode Name Dir(s): {0}\n' \
+                  '***** Then, retry this step. *****'.format(", ".join(problematic_previous_namenode_dirs))
+        Logger.error(message)
+        raise Fail(message)
+
+def prepare_upgrade_enter_safe_mode(hdfs_binary):
+  """
+  During a NonRolling (aka Express Upgrade), preparing the NameNode requires first entering Safemode.
+  :param hdfs_binary: name/path of the HDFS binary to use
+  """
+  import params
+
+  safe_mode_enter_cmd = format("{hdfs_binary} dfsadmin -safemode enter")
+  safe_mode_enter_and_check_for_on = format("{safe_mode_enter_cmd} | grep 'Safe mode is ON'")
+  try:
+    # Safe to call if already in Safe Mode
+    Logger.info("Enter SafeMode if not already in it.")
+    as_user(safe_mode_enter_and_check_for_on, params.hdfs_user, env={'PATH': params.hadoop_bin_dir})
+  except Exception, e:
+    message = format("Could not enter safemode. As the HDFS user, call this command: {safe_mode_enter_cmd}")
+    Logger.error(message)
+    raise Fail(message)
+
+def prepare_upgrade_save_namespace(hdfs_binary):
+  """
+  During a NonRolling (aka Express Upgrade), preparing the NameNode requires saving the namespace.
+  :param hdfs_binary: name/path of the HDFS binary to use
+  """
+  import params
+
+  save_namespace_cmd = format("{hdfs_binary} dfsadmin -saveNamespace")
+  try:
+    Logger.info("Checkpoint the current namespace.")
+    as_user(save_namespace_cmd, params.hdfs_user, env={'PATH': params.hadoop_bin_dir})
+  except Exception, e:
+    message = format("Could save the NameSpace. As the HDFS user, call this command: {save_namespace_cmd}")
+    Logger.error(message)
+    raise Fail(message)
+
+def prepare_upgrade_backup_namenode_dir():
+  """
+  During a NonRolling (aka Express Upgrade), preparing the NameNode requires backing up the NameNode Name Dirs.
+  """
+  import params
+
+  i = 0
+  failed_paths = []
+  nn_name_dirs = params.dfs_name_dir.split(',')
+  backup_destination_root_dir = "/tmp/upgrades/{0}".format(params.stack_version_unformatted)
+  if len(nn_name_dirs) > 0:
+    Logger.info("Backup the NameNode name directory's CURRENT folder.")
+  for nn_dir in nn_name_dirs:
+    i += 1
+    namenode_current_image = os.path.join(nn_dir, "current")
+    unique = get_unique_id_and_date() + "_" + str(i)
+    # Note that /tmp may not be writeable.
+    backup_current_folder = "{0}/namenode_{1}/".format(backup_destination_root_dir, unique)
+
+    if os.path.isdir(namenode_current_image) and not os.path.isdir(backup_current_folder):
+      try:
+        os.makedirs(backup_current_folder)
+        Execute(('cp', '-ar', namenode_current_image, backup_current_folder),
+                sudo=True
+        )
+      except Exception, e:
+        failed_paths.append(namenode_current_image)
+  if len(failed_paths) > 0:
+    Logger.error("Could not backup the NameNode Name Dir(s) to {0}, make sure that the destination path is "
+                 "writeable and copy the directories on your own. Directories: {1}".format(backup_destination_root_dir,
+                                                                                           ", ".join(failed_paths)))
+
+def prepare_upgrade_finalize_previous_upgrades(hdfs_binary):
+  """
+  During a NonRolling (aka Express Upgrade), preparing the NameNode requires Finalizing any upgrades that are in progress.
+  :param hdfs_binary: name/path of the HDFS binary to use
+  """
+  import params
+
+  finalize_command = format("{hdfs_binary} dfsadmin -rollingUpgrade finalize")
+  try:
+    Logger.info("Attempt to Finalize if there are any in-progress upgrades. "
+                "This will return 255 if no upgrades are in progress.")
+    code, out = shell.checked_call(finalize_command, logoutput=True, user=params.hdfs_user)
+    if out:
+      expected_substring = "there is no rolling upgrade in progress"
+      if expected_substring not in out.lower():
+        Logger.warning('Finalize command did not contain substring: %s' % expected_substring)
+    else:
+      Logger.warning("Finalize command did not return any output.")
+  except Exception, e:
+    Logger.warning("Ensure no upgrades are in progress.")
+
+def reach_safemode_state(user, safemode_state, in_ha, hdfs_binary):
   """
   Enter or leave safemode for the Namenode.
-  @param user: user to perform action as
-  @param safemode_state: Desired state of ON or OFF
-  @param in_ha: bool indicating if Namenode High Availability is enabled
-  @:return Returns a tuple of (transition success, original state). If no change is needed, the indicator of
+  :param user: user to perform action as
+  :param safemode_state: Desired state of ON or OFF
+  :param in_ha: bool indicating if Namenode High Availability is enabled
+  :param hdfs_binary: name/path of the HDFS binary to use
+  :return: Returns a tuple of (transition success, original state). If no change is needed, the indicator of
   success will be True
   """
   Logger.info("Prepare to transition into safemode state %s" % safemode_state)
@@ -44,7 +166,7 @@ def reach_safemode_state(user, safemode_state, in_ha):
   original_state = SafeMode.UNKNOWN
 
   hostname = params.hostname
-  safemode_check = format("hdfs dfsadmin -safemode get")
+  safemode_check = format("{hdfs_binary} dfsadmin -safemode get")
 
   grep_pattern = format("Safe mode is {safemode_state} in {hostname}") if in_ha else format("Safe mode is {safemode_state}")
   safemode_check_with_grep = format("hdfs dfsadmin -safemode get | grep '{grep_pattern}'")
@@ -61,7 +183,7 @@ def reach_safemode_state(user, safemode_state, in_ha):
         return (True, original_state)
       else:
         # Make a transition
-        command = "hdfs dfsadmin -safemode %s" % (safemode_to_instruction[safemode_state])
+        command = "{0} dfsadmin -safemode {1}".format(hdfs_binary, safemode_to_instruction[safemode_state])
         Execute(command,
                 user=user,
                 logoutput=True,
@@ -74,7 +196,7 @@ def reach_safemode_state(user, safemode_state, in_ha):
   return (False, original_state)
 
 
-def prepare_rolling_upgrade():
+def prepare_rolling_upgrade(hdfs_binary):
   """
   Perform either an upgrade or a downgrade.
 
@@ -83,6 +205,7 @@ def prepare_rolling_upgrade():
   1. Leave safemode if the safemode status is not OFF
   2. Execute a rolling upgrade "prepare"
   3. Execute a rolling upgrade "query"
+  :param hdfs_binary: name/path of the HDFS binary to use
   """
   import params
 
@@ -96,12 +219,12 @@ def prepare_rolling_upgrade():
 
 
   if params.upgrade_direction == Direction.UPGRADE:
-    safemode_transition_successful, original_state = reach_safemode_state(params.hdfs_user, SafeMode.OFF, True)
+    safemode_transition_successful, original_state = reach_safemode_state(params.hdfs_user, SafeMode.OFF, True, hdfs_binary)
     if not safemode_transition_successful:
       raise Fail("Could not transition to safemode state %s. Please check logs to make sure namenode is up." % str(SafeMode.OFF))
 
-    prepare = "hdfs dfsadmin -rollingUpgrade prepare"
-    query = "hdfs dfsadmin -rollingUpgrade query"
+    prepare = format("{hdfs_binary} dfsadmin -rollingUpgrade prepare")
+    query = format("{hdfs_binary} dfsadmin -rollingUpgrade query")
     Execute(prepare,
             user=params.hdfs_user,
             logoutput=True)
@@ -111,9 +234,11 @@ def prepare_rolling_upgrade():
   elif params.upgrade_direction == Direction.DOWNGRADE:
     pass
 
-def finalize_rolling_upgrade():
+def finalize_upgrade(upgrade_type, hdfs_binary):
   """
   Finalize the Namenode upgrade, at which point it cannot be downgraded.
+  :param upgrade_type rolling or nonrolling
+  :param hdfs_binary: name/path of the HDFS binary to use
   """
   Logger.info("Executing Rolling Upgrade finalize")
   import params
@@ -122,8 +247,15 @@ def finalize_rolling_upgrade():
     kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}") 
     Execute(kinit_command, user=params.hdfs_user, logoutput=True)
 
-  finalize_cmd = "hdfs dfsadmin -rollingUpgrade finalize"
-  query_cmd = "hdfs dfsadmin -rollingUpgrade query"
+  finalize_cmd = ""
+  query_cmd = ""
+  if upgrade_type == "rolling":
+    finalize_cmd = format("{hdfs_binary} dfsadmin -rollingUpgrade finalize")
+    query_cmd = format("{hdfs_binary} dfsadmin -rollingUpgrade query")
+
+  elif upgrade_type == "nonrolling":
+    finalize_cmd = format("{hdfs_binary} dfsadmin -finalizeUpgrade")
+    query_cmd = format("{hdfs_binary} dfsadmin -rollingUpgrade query")
 
   Execute(query_cmd,
         user=params.hdfs_user,

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
index be6f0d5..df5569e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
@@ -41,7 +41,7 @@ class NFSGateway(Script):
 
     self.install_packages(env, params.exclude_packages)
 
-  def pre_rolling_restart(self, env):
+  def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 
@@ -49,14 +49,14 @@ class NFSGateway(Script):
       conf_select.select(params.stack_name, "hadoop", params.version)
       hdp_select.select("hadoop-hdfs-nfs3", params.version)
 
-  def start(self, env, rolling_restart=False):
+  def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 
     self.configure(env)
     nfsgateway(action="start")
 
-  def stop(self, env, rolling_restart=False):
+  def stop(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 


[28/50] [abbrv] ambari git commit: AMBARI-13421. Blueprints: install for Ranger Components (ranger-admin, ranger-usersync, ranger-kms) (Sebastian Toader via smohanty)

Posted by nc...@apache.org.
AMBARI-13421. Blueprints: install for Ranger Components (ranger-admin, ranger-usersync, ranger-kms) (Sebastian Toader via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c4c83384
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c4c83384
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c4c83384

Branch: refs/heads/branch-dev-patch-upgrade
Commit: c4c833842978d321c0d08c319b2f308d5861e323
Parents: 1ff22df
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Thu Oct 22 10:17:07 2015 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Thu Oct 22 10:17:07 2015 -0700

----------------------------------------------------------------------
 .../libraries/functions/ranger_functions.py     |   8 +-
 .../libraries/functions/ranger_functions_v2.py  |  39 +++++--
 .../libraries/functions/setup_ranger_plugin.py  |   6 +-
 .../functions/setup_ranger_plugin_xml.py        |   6 +-
 .../java/org/apache/ambari/server/Role.java     |   6 ++
 .../HBASE/0.96.0.2.0/package/scripts/params.py  |   3 +-
 .../package/scripts/setup_ranger_hbase.py       |   7 +-
 .../HDFS/2.1.0.2.0/package/scripts/params.py    |   1 +
 .../package/scripts/setup_ranger_hdfs.py        |   7 +-
 .../HIVE/0.12.0.2.0/package/scripts/params.py   |   1 +
 .../package/scripts/setup_ranger_hive.py        |   9 +-
 .../KAFKA/0.8.1.2.2/package/scripts/params.py   |   1 +
 .../package/scripts/setup_ranger_kafka.py       |   8 +-
 .../KNOX/0.5.0.2.2/package/scripts/params.py    |   3 +-
 .../package/scripts/setup_ranger_knox.py        |   9 +-
 .../RANGER_KMS/0.5.0.2.3/package/scripts/kms.py |   1 -
 .../STORM/0.9.1.2.1/package/scripts/params.py   |   2 +-
 .../package/scripts/setup_ranger_storm.py       |   7 +-
 .../YARN/2.1.0.2.0/package/scripts/params.py    |   2 +-
 .../package/scripts/setup_ranger_yarn.py        |  10 +-
 .../stacks/HDP/2.2/role_command_order.json      |  13 ++-
 .../ambari/server/stack/StackManagerTest.java   | 105 +++++++++++++++++++
 22 files changed, 218 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-common/src/main/python/resource_management/libraries/functions/ranger_functions.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/ranger_functions.py b/ambari-common/src/main/python/resource_management/libraries/functions/ranger_functions.py
index de58976..dcf59c1 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/ranger_functions.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/ranger_functions.py
@@ -30,7 +30,7 @@ import re
 class Rangeradmin:
   sInstance = None
 
-  def __init__(self, url='http://localhost:6080'):
+  def __init__(self, url='http://localhost:6080', skip_if_rangeradmin_down = True):
 
     self.baseUrl = url
     self.urlLogin = self.baseUrl + '/login.jsp'
@@ -41,6 +41,10 @@ class Rangeradmin:
     self.urlGroups = self.baseUrl + '/service/xusers/groups'
     self.urlUsers = self.baseUrl + '/service/xusers/users'
     self.urlSecUsers = self.baseUrl + '/service/xusers/secure/users'
+    self.skip_if_rangeradmin_down = skip_if_rangeradmin_down
+
+    if self.skip_if_rangeradmin_down:
+      Logger.info("Rangeradmin: Skip ranger admin if it's down !")
 
   def get_repository_by_name_urllib2(self, name, component, status, usernamepassword):
     """
@@ -121,6 +125,8 @@ class Rangeradmin:
                 raise Fail('{0} Repository creation failed in Ranger admin'.format(component.title()))
       else:
         raise Fail('Ambari admin user creation failed')
+    elif not self.skip_if_rangeradmin_down:
+      raise Fail("Connection failed to Ranger Admin !")
           
   def create_repository_urllib2(self, data, usernamepassword, policy_user):
     """

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-common/src/main/python/resource_management/libraries/functions/ranger_functions_v2.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/ranger_functions_v2.py b/ambari-common/src/main/python/resource_management/libraries/functions/ranger_functions_v2.py
index 81658bf..b79f6d8 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/ranger_functions_v2.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/ranger_functions_v2.py
@@ -27,12 +27,10 @@ from resource_management.core.exceptions import Fail
 from resource_management.libraries.functions.format import format
 import re
 
-
 class RangeradminV2:
   sInstance = None
 
-  def __init__(self, url='http://localhost:6080'):
-    
+  def __init__(self, url='http://localhost:6080', skip_if_rangeradmin_down = True):
     self.base_url = url
     self.url_login = self.base_url + '/login.jsp'
     self.url_login_post = self.base_url + '/j_spring_security_check'
@@ -42,6 +40,10 @@ class RangeradminV2:
     self.url_groups = self.base_url + '/service/xusers/groups'
     self.url_users = self.base_url + '/service/xusers/users'
     self.url_sec_users = self.base_url + '/service/xusers/secure/users'
+    self.skip_if_rangeradmin_down = skip_if_rangeradmin_down
+
+    if self.skip_if_rangeradmin_down:
+      Logger.info("RangeradminV2: Skip ranger admin if it's down !")
 
   def get_repository_by_name_urllib2(self, name, component, status, usernamepassword):
     """
@@ -88,21 +90,36 @@ class RangeradminV2:
     ambari_ranger_password = unicode(ambari_ranger_password)
     admin_password = unicode(admin_password)
     ambari_username_password_for_ranger = format('{ambari_ranger_admin}:{ambari_ranger_password}')
+
     
     if response_code is not None and response_code == 200:
       user_resp_code = self.create_ambari_admin_user(ambari_ranger_admin, ambari_ranger_password, format("{admin_uname}:{admin_password}"))
       if user_resp_code is not None and user_resp_code == 200:
-        repo = self.get_repository_by_name_urllib2(repo_name, component, 'true', ambari_username_password_for_ranger)
-        if repo is not None:
-          Logger.info('{0} Repository {1} exist'.format(component.title(), repo['name']))
-        else:
-          response = self.create_repository_urllib2(repo_data, ambari_username_password_for_ranger)
-          if response is not None:
-            Logger.info('{0} Repository created in Ranger admin'.format(component.title()))
+        retryCount = 0
+        while retryCount <= 5:
+          repo = self.get_repository_by_name_urllib2(repo_name, component, 'true', ambari_username_password_for_ranger)
+          if repo is not None:
+            Logger.info('{0} Repository {1} exist'.format(component.title(), repo['name']))
+            break
           else:
-            Logger.error('{0} Repository creation failed in Ranger admin'.format(component.title()))
+            response = self.create_repository_urllib2(repo_data, ambari_username_password_for_ranger)
+            if response is not None:
+              Logger.info('{0} Repository created in Ranger admin'.format(component.title()))
+              break
+            else:
+              if retryCount < 5:
+                Logger.info("Retry Repository Creation is being called")
+                time.sleep(30) # delay for 30 seconds
+                retryCount += 1
+              else:
+                Logger.error('{0} Repository creation failed in Ranger admin'.format(component.title()))
+                raise Fail('{0} Repository creation failed in Ranger admin'.format(component.title()))
       else:
         Logger.error('Ambari admin user creation failed')
+        raise Fail('Ambari admin user creation failed')
+    elif not self.skip_if_rangeradmin_down:
+      raise Fail("Connection failed to Ranger Admin !")
+
           
   def create_repository_urllib2(self, data, usernamepassword):
     """

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
index e4a19aa..e5e4266 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
@@ -37,7 +37,7 @@ def setup_ranger_plugin(component_select_name, service_name,
                         repo_name, plugin_repo_dict, 
                         ranger_env_properties, plugin_properties,
                         policy_user, policymgr_mgr_url,
-                        plugin_enabled, component_user, component_group, api_version=None, **kwargs):
+                        plugin_enabled, component_user, component_group, api_version=None, skip_if_rangeradmin_down = True, **kwargs):
   File(downloaded_custom_connector,
       content = DownloadSource(driver_curl_source),
       mode = 0644
@@ -68,9 +68,9 @@ def setup_ranger_plugin(component_select_name, service_name,
   if plugin_enabled:
     cmd = (format('enable-{service_name}-plugin.sh'),)
     if api_version == 'v2' and api_version is not None:
-      ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url)
+      ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url, skip_if_rangeradmin_down = skip_if_rangeradmin_down)
     else:
-      ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url)
+      ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url, skip_if_rangeradmin_down = skip_if_rangeradmin_down)
 
     ranger_adm_obj.create_ranger_repository(service_name, repo_name, plugin_repo_dict,
                                             ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'], 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
index 0b404a9..29ffe0d 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
@@ -44,7 +44,7 @@ def setup_ranger_plugin(component_select_name, service_name,
                         plugin_policymgr_ssl_properties, plugin_policymgr_ssl_attributes,
                         component_list, audit_db_is_enabled, credential_file, 
                         xa_audit_db_password, ssl_truststore_password,
-                        ssl_keystore_password, api_version=None, hdp_version_override = None):
+                        ssl_keystore_password, api_version=None, hdp_version_override = None, skip_if_rangeradmin_down = True):
 
   if audit_db_is_enabled:
     File(component_downloaded_custom_connector,
@@ -68,9 +68,9 @@ def setup_ranger_plugin(component_select_name, service_name,
   if plugin_enabled:
 
     if api_version == 'v2' and api_version is not None:
-      ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url)
+      ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url, skip_if_rangeradmin_down=skip_if_rangeradmin_down)
     else:
-      ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url)
+      ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url, skip_if_rangeradmin_down=skip_if_rangeradmin_down)
 
     ranger_adm_obj.create_ranger_repository(service_name, repo_name, plugin_repo_dict,
                                             ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'],

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/java/org/apache/ambari/server/Role.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/Role.java b/ambari-server/src/main/java/org/apache/ambari/server/Role.java
index df60988..f72cc5b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/Role.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/Role.java
@@ -113,6 +113,12 @@ public class Role {
   public static final Role METRICS_MONITOR = valueOf("METRICS_MONITOR");
   public static final Role AMS_SERVICE_CHECK = valueOf("AMBARI_METRICS_SERVICE_CHECK");
   public static final Role ACCUMULO_CLIENT = valueOf("ACCUMULO_CLIENT");
+  public static final Role RANGER_ADMIN  = valueOf("RANGER_ADMIN");
+  public static final Role RANGER_USERSYNC = valueOf("RANGER_USERSYNC");
+  public static final Role KNOX_GATEWAY = valueOf("KNOX_GATEWAY");
+  public static final Role KAFKA_BROKER = valueOf("KAFKA_BROKER");
+  public static final Role NIMBUS = valueOf("NIMBUS");
+  public static final Role RANGER_KMS_SERVER = valueOf("RANGER_KMS_SERVER");
   public static final Role INSTALL_PACKAGES = valueOf("install_packages");
   public static final Role UPDATE_REPO = valueOf("update_repo");
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py
index 36dd07f..f3208ce 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py
@@ -25,4 +25,5 @@ if OSCheck.is_windows_family():
 else:
   from params_linux import *
 
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
\ No newline at end of file
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+retryAble = default("/commandParams/command_retry_enabled", False)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
index 6b4dfaa..8f4a6d0 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
@@ -34,6 +34,11 @@ def setup_ranger_hbase(rolling_upgrade = False):
     if rolling_upgrade:
       hdp_version = params.version
 
+    if params.retryAble:
+      Logger.info("HBase: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("HBase: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
     setup_ranger_plugin('hbase-client', 'hbase', 
                         params.downloaded_custom_connector, params.driver_curl_source,
                         params.driver_curl_target, params.java64_home,
@@ -48,6 +53,6 @@ def setup_ranger_hbase(rolling_upgrade = False):
                         component_list=['hbase-client', 'hbase-master', 'hbase-regionserver'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
                         credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        hdp_version_override = hdp_version)                 
+                        hdp_version_override = hdp_version, skip_if_rangeradmin_down= not params.retryAble)
   else:
     Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
index b89eefd..7514918 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
@@ -26,3 +26,4 @@ else:
 
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
 nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
+retryAble = default("/commandParams/command_retry_enabled", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
index 6a64b2f..bd158ec 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
@@ -34,6 +34,11 @@ def setup_ranger_hdfs(upgrade_type=None):
     if upgrade_type is not None:
       hdp_version = params.version
 
+    if params.retryAble:
+        Logger.info("HDFS: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("HDFS: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
     setup_ranger_plugin('hadoop-client', 'hdfs',
                         params.downloaded_custom_connector, params.driver_curl_source,
                         params.driver_curl_target, params.java_home,
@@ -48,6 +53,6 @@ def setup_ranger_hdfs(upgrade_type=None):
                         component_list=['hadoop-client'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
                         credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        hdp_version_override = hdp_version)
+                        hdp_version_override = hdp_version, skip_if_rangeradmin_down= not params.retryAble)
   else:
     Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py
index 36f7983..f10a3f3 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py
@@ -26,3 +26,4 @@ else:
   from params_linux import *
 
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+retryAble = default("/commandParams/command_retry_enabled", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
index b0f0c3f..5fdaa70 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
@@ -32,7 +32,12 @@ def setup_ranger_hive(rolling_upgrade = False):
     hdp_version = None
     if rolling_upgrade:
       hdp_version = params.version
-    
+
+    if params.retryAble:
+      Logger.info("Hive: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("Hive: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
     setup_ranger_plugin('hive-server2', 'hive', 
                         params.ranger_downloaded_custom_connector, params.ranger_driver_curl_source,
                         params.ranger_driver_curl_target, params.java64_home,
@@ -47,6 +52,6 @@ def setup_ranger_hive(rolling_upgrade = False):
                         component_list=['hive-client', 'hive-metastore', 'hive-server2'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
                         credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        hdp_version_override = hdp_version)                 
+                        hdp_version_override = hdp_version, skip_if_rangeradmin_down= not params.retryAble)
   else:
     Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
index dc0c087..da8333a 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
@@ -32,6 +32,7 @@ import status_params
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 stack_name = default("/hostLevelParams/stack_name", None)
+retryAble = default("/commandParams/command_retry_enabled", False)
 
 version = default("/commandParams/version", None)
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/setup_ranger_kafka.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/setup_ranger_kafka.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/setup_ranger_kafka.py
index 540bb9a..c210791 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/setup_ranger_kafka.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/setup_ranger_kafka.py
@@ -24,6 +24,12 @@ def setup_ranger_kafka():
   if params.has_ranger_admin:
 
     from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+
+    if params.retryAble:
+      Logger.info("Kafka: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("Kafka: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
     setup_ranger_plugin('kafka-broker', 'kafka', 
                         params.downloaded_custom_connector, params.driver_curl_source,
                         params.driver_curl_target, params.java64_home,
@@ -38,7 +44,7 @@ def setup_ranger_kafka():
                         component_list=['kafka-broker'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
                         credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        api_version = 'v2')
+                        api_version = 'v2', skip_if_rangeradmin_down= not params.retryAble)
     
     if params.enable_ranger_kafka: 
       Execute(('cp', '--remove-destination', params.setup_ranger_env_sh_source, params.setup_ranger_env_sh_target),

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params.py
index 8fe1028..14e021d 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params.py
@@ -26,4 +26,5 @@ if OSCheck.is_windows_family():
 else:
   from params_linux import *
 
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
\ No newline at end of file
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+retryAble = default("/commandParams/command_retry_enabled", False)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
index f1319b3..1efe9e0 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
@@ -33,7 +33,12 @@ def setup_ranger_knox(rolling_upgrade = False):
     if rolling_upgrade:
       hdp_version = params.version
 
-    setup_ranger_plugin('knox-server', 'knox', 
+    if params.retryAble:
+      Logger.info("Knox: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("Knox: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+    setup_ranger_plugin('knox-server', 'knox',
                         params.downloaded_custom_connector, params.driver_curl_source,
                         params.driver_curl_target, params.java_home,
                         params.repo_name, params.knox_ranger_plugin_repo,
@@ -47,6 +52,6 @@ def setup_ranger_knox(rolling_upgrade = False):
                         component_list=['knox-server'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
                         credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        hdp_version_override = hdp_version)
+                        hdp_version_override = hdp_version, skip_if_rangeradmin_down= not params.retryAble)
   else:
     Logger.info('Ranger admin not installed')
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
index 570b2b7..4c5bd1a 100755
--- a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
@@ -264,7 +264,6 @@ def enable_kms_plugin():
   import params
 
   if params.has_ranger_admin:
-
     ranger_adm_obj = Rangeradmin(url=params.policymgr_mgr_url)
     ambari_username_password_for_ranger = format("{ambari_ranger_admin}:{ambari_ranger_password}")
     response_code = ranger_adm_obj.check_ranger_login_urllib2(params.policymgr_mgr_url)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params.py
index 1e591f4..f10a3f3 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params.py
@@ -26,4 +26,4 @@ else:
   from params_linux import *
 
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
+retryAble = default("/commandParams/command_retry_enabled", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/setup_ranger_storm.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/setup_ranger_storm.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/setup_ranger_storm.py
index d874ba3..5d90f5b 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/setup_ranger_storm.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/setup_ranger_storm.py
@@ -33,6 +33,11 @@ def setup_ranger_storm(rolling_upgrade = False):
     if rolling_upgrade:
       hdp_version = params.version
 
+    if params.retryAble:
+      Logger.info("Storm: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("Storm: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
     setup_ranger_plugin('storm-nimbus', 'storm',
                         params.downloaded_custom_connector, params.driver_curl_source,
                         params.driver_curl_target, params.java64_home,
@@ -47,6 +52,6 @@ def setup_ranger_storm(rolling_upgrade = False):
                         component_list=['storm-client', 'storm-nimbus'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
                         credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        hdp_version_override = hdp_version)
+                        hdp_version_override = hdp_version, skip_if_rangeradmin_down= not params.retryAble)
   else:
     Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params.py
index 5695e83..073e84f 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params.py
@@ -28,4 +28,4 @@ else:
   from params_linux import *
 
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
+retryAble = default("/commandParams/command_retry_enabled", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/setup_ranger_yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/setup_ranger_yarn.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/setup_ranger_yarn.py
index c8b12df..5db65d0d 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/setup_ranger_yarn.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/setup_ranger_yarn.py
@@ -22,8 +22,12 @@ def setup_ranger_yarn():
   if params.has_ranger_admin:
 
     from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
-    
-    
+
+    if params.retryAble:
+      Logger.info("YARN: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("YARN: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
     setup_ranger_plugin('hadoop-yarn-resourcemanager', 'yarn', 
                         params.downloaded_custom_connector, params.driver_curl_source,
                         params.driver_curl_target, params.java64_home,
@@ -38,7 +42,7 @@ def setup_ranger_yarn():
                         component_list=['hadoop-yarn-resourcemanager'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
                         credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        api_version = 'v2'
+                        api_version = 'v2', skip_if_rangeradmin_down= not params.retryAble
       )                 
   else:
     Logger.info('Ranger admin not installed')
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
index 3571a6c..3beed16 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
@@ -21,7 +21,16 @@
     "FLUME_SERVICE_CHECK-SERVICE_CHECK": ["FLUME_HANDLER-START"],
     "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
     "SLIDER_SERVICE_CHECK-SERVICE_CHECK" : ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "KAFKA_BROKER-START" : ["ZOOKEEPER_SERVER-START"],
-    "KAFKA_SERVICE_CHECK-SERVICE_CHECK": ["KAFKA_BROKER-START"]
+    "KAFKA_BROKER-START" : ["ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START"],
+    "KAFKA_SERVICE_CHECK-SERVICE_CHECK": ["KAFKA_BROKER-START"],
+    "RANGER_USERSYNC-START" : ["RANGER_ADMIN-START", "RANGER_KMS_SERVER-START"],
+    "ZOOKEEPER_SERVER-START" : ["RANGER_USERSYNC-START"],
+    "DATANODE-START" : ["RANGER_USERSYNC-START"],
+    "NAMENODE-START" : ["RANGER_USERSYNC-START"],
+    "KNOX_GATEWAY-START": ["RANGER_USERSYNC-START"],
+    "RESOURCEMANAGER-START" : ["RANGER_USERSYNC-START"],
+    "NIMBUS-START": ["RANGER_USERSYNC-START"],
+    "HBASE_MASTER-START": ["RANGER_USERSYNC-START"],
+    "HIVE_SERVER-START" : ["RANGER_USERSYNC-START"]
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4c83384/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
index be8b073..6a13ab2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
@@ -41,6 +41,8 @@ import java.util.Map;
 import com.google.gson.Gson;
 import com.google.gson.reflect.TypeToken;
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.Role;
+import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
@@ -54,6 +56,7 @@ import org.apache.ambari.server.state.ServiceOsSpecific;
 import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.stack.MetricDefinition;
 import org.apache.ambari.server.state.stack.OsFamily;
+import org.apache.ambari.server.state.stack.StackRoleCommandOrder;
 import org.apache.commons.lang.StringUtils;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -667,5 +670,107 @@ public class StackManagerTest {
     }
   }
 
+  @Test
+  public void testServicesWithRangerPluginRoleCommandOrder() throws AmbariException {
+    // Given
+    String stackRoot = ClassLoader.getSystemClassLoader().getResource("stacks").getPath().replace("test-classes","classes");
+    String commonServices = ClassLoader.getSystemClassLoader().getResource("common-services").getPath().replace("test-classes","classes");
+
+    MetainfoDAO metaInfoDao = createNiceMock(MetainfoDAO.class);
+    StackDAO stackDao = createNiceMock(StackDAO.class);
+    ActionMetadata actionMetadata = createNiceMock(ActionMetadata.class);
+    Configuration config = createNiceMock(Configuration.class);
+
+    expect(config.getSharedResourcesDirPath()).andReturn(
+      ClassLoader.getSystemClassLoader().getResource("").getPath()).anyTimes();
+
+    replay(config, metaInfoDao, stackDao, actionMetadata);
+
+    OsFamily osFamily = new OsFamily(config);
+
+    StackManager stackManager = new StackManager(new File(stackRoot), new File(commonServices), osFamily, metaInfoDao, actionMetadata, stackDao);
+
+    String rangerUserSyncRoleCommand = Role.RANGER_USERSYNC + "-" + RoleCommand.START;
+    String rangerAdminRoleCommand = Role.RANGER_ADMIN + "-" + RoleCommand.START;
+
+    // When
+    StackInfo hdp = stackManager.getStack("HDP", "2.3");
+    Map<String, Object> rco = hdp.getRoleCommandOrder().getContent();
+
+    // Then
+    // verify that services that have ranger plugin are after ranger admin in the role command order sequence
+    // as these services require ranger admin and ranger user sync to up upfront
+    Map<String, Object> generalDeps = (Map<String, Object>)rco.get("general_deps");
+
+    // HDFS
+    String nameNodeRoleCommand  = Role.NAMENODE +  "-" + RoleCommand.START;
+    ArrayList<String> nameNodeBlockers = (ArrayList<String>)generalDeps.get(nameNodeRoleCommand);
+
+    assertTrue(nameNodeRoleCommand + " should be dependent of " + rangerUserSyncRoleCommand, nameNodeBlockers.contains(rangerUserSyncRoleCommand));
+
+    String dataNodeRoleCommand = Role.DATANODE +  "-" + RoleCommand.START;
+    ArrayList<String> dataNodeBlockers = (ArrayList<String>)generalDeps.get(dataNodeRoleCommand);
+
+    assertTrue(dataNodeRoleCommand + " should be dependent of " + rangerUserSyncRoleCommand, dataNodeBlockers.contains(rangerUserSyncRoleCommand));
+
+    // YARN
+    String resourceManagerCommandRoleCommand = Role.RESOURCEMANAGER +  "-" + RoleCommand.START;
+    ArrayList<String> resourceManagerBlockers = (ArrayList<String>)generalDeps.get(resourceManagerCommandRoleCommand);
+
+    assertTrue(resourceManagerCommandRoleCommand + " should be dependent of " + rangerUserSyncRoleCommand, resourceManagerBlockers.contains(rangerUserSyncRoleCommand));
+
+
+    // HBase
+    String hbaseRoleCommand = Role.HBASE_MASTER +  "-" + RoleCommand.START;
+    ArrayList<String> hbaseBlockers = (ArrayList<String>)generalDeps.get(hbaseRoleCommand);
+
+    assertTrue(hbaseRoleCommand + " should be dependent of " + rangerUserSyncRoleCommand, hbaseBlockers.contains(rangerUserSyncRoleCommand));
+
+    // Knox
+    String knoxRoleCommand = Role.KNOX_GATEWAY +  "-" + RoleCommand.START;
+    ArrayList<String> knoxBlockers = (ArrayList<String>)generalDeps.get(knoxRoleCommand);
+
+    assertTrue(knoxRoleCommand + " should be dependent of " + rangerUserSyncRoleCommand, knoxBlockers.contains(rangerUserSyncRoleCommand));
+
+    // Kafka
+    String kafkaRoleCommand = Role.KAFKA_BROKER +  "-" + RoleCommand.START;
+    ArrayList<String> kafkaBlockers = (ArrayList<String>)generalDeps.get(kafkaRoleCommand);
+
+    assertTrue(Role.KAFKA_BROKER + "-" + RoleCommand.START + " should be dependent of " + rangerUserSyncRoleCommand, kafkaBlockers.contains(rangerUserSyncRoleCommand));
+
+    // Hive
+    String hiveRoleCommand = Role.HIVE_SERVER +  "-" + RoleCommand.START;
+    ArrayList<String> hiveBlockers = (ArrayList<String>)generalDeps.get(hiveRoleCommand);
+
+    assertTrue(hiveRoleCommand + " should be dependent of " + rangerUserSyncRoleCommand, hiveBlockers.contains(rangerUserSyncRoleCommand));
+
+    // Storm
+    String stormRoleCommand = Role.NIMBUS +  "-" + RoleCommand.START;
+    ArrayList<String> stormBlockers = (ArrayList<String>)generalDeps.get(stormRoleCommand);
+
+    assertTrue(stormRoleCommand + " should be dependent of " + rangerUserSyncRoleCommand, stormBlockers.contains(rangerUserSyncRoleCommand));
+
+    // Ranger KMS
+    String kmsRoleCommand = Role.RANGER_KMS_SERVER +  "-" + RoleCommand.START;
+    ArrayList<String> rangerKmsBlockers = (ArrayList<String>)generalDeps.get(kmsRoleCommand);
+
+    assertTrue(kmsRoleCommand + " should be dependent of " + rangerAdminRoleCommand, rangerKmsBlockers.contains(rangerAdminRoleCommand));
+
+    // Ranger User Sync
+    ArrayList<String> rangerUserSyncBlockers = (ArrayList<String>)generalDeps.get(rangerUserSyncRoleCommand);
+
+    assertTrue(rangerUserSyncRoleCommand + " should be dependent of " + rangerAdminRoleCommand, rangerUserSyncBlockers.contains(rangerAdminRoleCommand));
+    assertTrue(rangerUserSyncRoleCommand + " should be dependent of " + kmsRoleCommand, rangerUserSyncBlockers.contains(kmsRoleCommand));
+
+    // Zookeeper Server
+    String zookeeperServerRoleCommand = Role.ZOOKEEPER_SERVER + "-" + RoleCommand.START;
+    ArrayList<String> zookeeperBlockers = (ArrayList<String>)generalDeps.get(zookeeperServerRoleCommand);
+
+    assertTrue(zookeeperServerRoleCommand + " should be dependent of " + rangerUserSyncRoleCommand, zookeeperBlockers.contains(rangerUserSyncRoleCommand));
+
+
+  }
+
+
   //todo: component override assertions
 }


[44/50] [abbrv] ambari git commit: AMBARI-13528 Ambari-server upgrade from 2.1.2 to 2.1.3 fails (dsen)

Posted by nc...@apache.org.
AMBARI-13528 Ambari-server upgrade from 2.1.2 to 2.1.3 fails (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d30b5f03
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d30b5f03
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d30b5f03

Branch: refs/heads/branch-dev-patch-upgrade
Commit: d30b5f0369672bd62a246f37d07bfddc50df8c7a
Parents: e7f77b6
Author: Dmytro Sen <ds...@apache.org>
Authored: Fri Oct 23 12:59:58 2015 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Fri Oct 23 12:59:58 2015 +0300

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog213.java       | 406 ++++++++++++++++++-
 .../server/upgrade/UpgradeCatalog213Test.java   | 304 +++++++++++---
 2 files changed, 647 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d30b5f03/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
index 97cfb3a..965689a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
@@ -18,8 +18,12 @@
 
 package org.apache.ambari.server.upgrade;
 
+import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
+import java.text.MessageFormat;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -29,20 +33,39 @@ import java.util.Set;
 import java.util.UUID;
 
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
 import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
 import org.apache.ambari.server.orm.dao.DaoUtils;
+import org.apache.ambari.server.orm.dao.HostVersionDAO;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.dao.UpgradeDAO;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.orm.entities.HostVersionEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.RepositoryVersionState;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.alert.SourceType;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
+import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
+import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
 import org.apache.ambari.server.utils.VersionUtils;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -50,6 +73,7 @@ import com.google.gson.JsonObject;
 import com.google.gson.JsonParser;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
+import com.google.inject.persist.Transactional;
 
 /**
  * Upgrade catalog for version 2.1.3.
@@ -68,16 +92,22 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
   private static final String HADOOP_ENV_CONFIG = "hadoop-env";
   private static final String CONTENT_PROPERTY = "content";
   private static final String HADOOP_ENV_CONTENT_TO_APPEND = "\n{% if is_datanode_max_locked_memory_set %}\n" +
-                                    "# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n" +
-                                    "# Makes sense to fix only when runing DN as root \n" +
-                                    "if [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then\n" +
-                                    "  ulimit -l {{datanode_max_locked_memory}}\n" +
-                                    "fi\n" +
-                                    "{% endif %};\n";
+    "# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n" +
+    "# Makes sense to fix only when runing DN as root \n" +
+    "if [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then\n" +
+    "  ulimit -l {{datanode_max_locked_memory}}\n" +
+    "fi\n" +
+    "{% endif %};\n";
 
   private static final String DOWNGRADE_ALLOWED_COLUMN = "downgrade_allowed";
   private static final String UPGRADE_SKIP_FAILURE_COLUMN = "skip_failures";
   private static final String UPGRADE_SKIP_SC_FAILURE_COLUMN = "skip_sc_failures";
+  public static final String UPGRADE_PACKAGE_COL = "upgrade_package";
+  public static final String UPGRADE_TYPE_COL = "upgrade_type";
+  public static final String REPO_VERSION_TABLE = "repo_version";
+
+  private static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
+  private static final String HOST_ID_COL = "host_id";
 
   private static final String KERBEROS_DESCRIPTOR_TABLE = "kerberos_descriptor";
   private static final String KERBEROS_DESCRIPTOR_NAME_COLUMN = "kerberos_descriptor_name";
@@ -91,6 +121,11 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
   @Inject
   DaoUtils daoUtils;
 
+  @Inject
+  private RepositoryVersionDAO repositoryVersionDAO;
+
+  @Inject
+  private ClusterDAO clusterDAO;
 
   // ----- Constructors ------------------------------------------------------
 
@@ -102,8 +137,7 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
   @Inject
   public UpgradeCatalog213(Injector injector) {
     super(injector);
-
-    daoUtils = injector.getInstance(DaoUtils.class);
+    this.injector = injector;
   }
 
   // ----- UpgradeCatalog ----------------------------------------------------
@@ -132,6 +166,10 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
   @Override
   protected void executeDDLUpdates() throws AmbariException, SQLException {
     executeUpgradeDDLUpdates();
+
+    // Alter the host_role_command table to allow host_id to be nullable
+    dbAccessor.alterColumn(HOST_ROLE_COMMAND_TABLE, new DBColumnInfo(HOST_ID_COL, Long.class, null, null, true));
+
     addKerberosDescriptorTable();
   }
 
@@ -153,6 +191,13 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
    */
   @Override
   protected void executePreDMLUpdates() throws AmbariException, SQLException {
+    // execute DDL updates
+    executeStackUpgradeDDLUpdates();
+
+    // DDL and DML mixed code, double check here
+    bootstrapRepoVersionForHDP21();
+
+    // execute DML updates, no DDL things after this line
     executeUpgradePreDMLUpdates();
   }
 
@@ -174,16 +219,18 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
     for (UpgradeEntity upgrade: upgrades){
       if (upgrade.isDowngradeAllowed() == null) {
         upgrade.setDowngradeAllowed(true);
-        upgradeDAO.merge(upgrade);
       }
 
       // ensure that these are set to false for existing upgrades
       upgrade.setAutoSkipComponentFailures(false);
       upgrade.setAutoSkipServiceCheckFailures(false);
 
+      // apply changes
+      upgradeDAO.merge(upgrade);
+
       LOG.info(String.format("Updated upgrade id %s, upgrade pack %s from version %s to %s",
-          upgrade.getId(), upgrade.getUpgradePackage(), upgrade.getFromVersion(),
-          upgrade.getToVersion()));
+        upgrade.getId(), upgrade.getUpgradePackage(), upgrade.getFromVersion(),
+        upgrade.getToVersion()));
     }
 
     // make the columns nullable now that they have defaults
@@ -206,6 +253,331 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
   }
 
   /**
+   * Move the upgrade_package column from the repo_version table to the upgrade table as follows,
+   * add column upgrade_package to upgrade table as String 255 and nullable
+   * populate column in the upgrade table
+   * drop the column in the repo_version table
+   * make the column in the upgrade table non-nullable.
+   * This has to be called as part of DML and not DDL since the persistence service has to be started.
+   * @throws AmbariException
+   * @throws SQLException
+   */
+  @Transactional
+  protected void executeStackUpgradeDDLUpdates() throws SQLException, AmbariException {
+    final Configuration.DatabaseType databaseType = configuration.getDatabaseType();
+
+    // Add columns
+    if (!dbAccessor.tableHasColumn(UPGRADE_TABLE, UPGRADE_PACKAGE_COL)) {
+      LOG.info("Adding upgrade_package column to upgrade table.");
+      dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_PACKAGE_COL, String.class, 255, null, true));
+    }
+    if (!dbAccessor.tableHasColumn(UPGRADE_TABLE, UPGRADE_TYPE_COL)) {
+      LOG.info("Adding upgrade_type column to upgrade table.");
+      dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_TYPE_COL, String.class, 32, null, true));
+    }
+
+    // Populate values in upgrade table.
+    boolean success = populateUpgradeTable();
+
+    if (!success) {
+      throw new AmbariException("Errors found while populating the upgrade table with values for columns upgrade_type and upgrade_package.");
+    }
+
+    if (dbAccessor.tableHasColumn(REPO_VERSION_TABLE, UPGRADE_PACKAGE_COL)) {
+      LOG.info("Dropping upgrade_package column from repo_version table.");
+      dbAccessor.dropColumn(REPO_VERSION_TABLE, UPGRADE_PACKAGE_COL);
+
+      // Now, make the added column non-nullable
+      // Make the hosts id non-null after all the values are populated
+      LOG.info("Making upgrade_package column in the upgrade table non-nullable.");
+      if (databaseType == Configuration.DatabaseType.DERBY) {
+        // This is a workaround for UpgradeTest.java unit test
+        dbAccessor.executeQuery("ALTER TABLE " + UPGRADE_TABLE + " ALTER column " + UPGRADE_PACKAGE_COL + " NOT NULL");
+      } else {
+        dbAccessor.alterColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_PACKAGE_COL, String.class, 255, null, false));
+      }
+    }
+
+    if (dbAccessor.tableHasColumn(REPO_VERSION_TABLE, UPGRADE_TYPE_COL)) {
+      // Now, make the added column non-nullable
+      // Make the hosts id non-null after all the values are populated
+      LOG.info("Making upgrade_type column in the upgrade table non-nullable.");
+      if (databaseType == Configuration.DatabaseType.DERBY) {
+        // This is a workaround for UpgradeTest.java unit test
+        dbAccessor.executeQuery("ALTER TABLE " + UPGRADE_TABLE + " ALTER column " + UPGRADE_TYPE_COL + " NOT NULL");
+      } else {
+        dbAccessor.alterColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_TYPE_COL, String.class, 32, null, false));
+      }
+    }
+  }
+
+  /**
+   * Populate the upgrade table with values for the columns upgrade_type and upgrade_package.
+   * The upgrade_type will default to {@code org.apache.ambari.server.state.stack.upgrade.UpgradeType.ROLLING}
+   * whereas the upgrade_package will be calculated.
+   * @return {@code} true on success, and {@code} false otherwise.
+   */
+  private boolean populateUpgradeTable() {
+    boolean success = true;
+    Statement statement = null;
+    ResultSet rs = null;
+    try {
+      statement = dbAccessor.getConnection().createStatement();
+      if (statement != null) {
+        // Need to use SQL since the schema is changing and some of the columns have not yet been added..
+        rs = statement.executeQuery("SELECT upgrade_id, cluster_id, from_version, to_version, direction, upgrade_package, upgrade_type FROM upgrade");
+        if (rs != null) {
+          try {
+            while (rs.next()) {
+              final long upgradeId = rs.getLong("upgrade_id");
+              final long clusterId = rs.getLong("cluster_id");
+              final String fromVersion = rs.getString("from_version");
+              final String toVersion = rs.getString("to_version");
+              final Direction direction = Direction.valueOf(rs.getString("direction"));
+              // These two values are likely null.
+              String upgradePackage = rs.getString("upgrade_package");
+              String upgradeType = rs.getString("upgrade_type");
+
+              LOG.info(MessageFormat.format("Populating rows for the upgrade table record with " +
+                  "upgrade_id: {0,number,#}, cluster_id: {1,number,#}, from_version: {2}, to_version: {3}, direction: {4}",
+                upgradeId, clusterId, fromVersion, toVersion, direction));
+
+              // Set all upgrades that have been done so far to type "rolling"
+              if (StringUtils.isEmpty(upgradeType)) {
+                LOG.info("Updating the record's upgrade_type to " + UpgradeType.ROLLING);
+                dbAccessor.executeQuery("UPDATE upgrade SET upgrade_type = '" + UpgradeType.ROLLING + "' WHERE upgrade_id = " + upgradeId);
+              }
+
+              if (StringUtils.isEmpty(upgradePackage)) {
+                String version = null;
+                StackEntity stack = null;
+
+                if (direction == Direction.UPGRADE) {
+                  version = toVersion;
+                } else if (direction == Direction.DOWNGRADE) {
+                  // TODO AMBARI-12698, this is going to be a problem.
+                  // During a downgrade, the "to_version" is overwritten to the source version, but the "from_version"
+                  // doesn't swap. E.g.,
+                  //  upgrade_id | from_version |  to_version  | direction
+                  // ------------+--------------+--------------+----------
+                  //           1 | 2.2.6.0-2800 | 2.3.0.0-2557 | UPGRADE
+                  //           2 | 2.2.6.0-2800 | 2.2.6.0-2800 | DOWNGRADE
+                  version = fromVersion;
+                }
+
+                ClusterEntity cluster = clusterDAO.findById(clusterId);
+
+                if (null != cluster) {
+                  stack = cluster.getDesiredStack();
+                  upgradePackage = calculateUpgradePackage(stack, version);
+                } else {
+                  LOG.error("Could not find a cluster with cluster_id " + clusterId);
+                }
+
+                if (!StringUtils.isEmpty(upgradePackage)) {
+                  LOG.info("Updating the record's upgrade_package to " + upgradePackage);
+                  dbAccessor.executeQuery("UPDATE upgrade SET upgrade_package = '" + upgradePackage + "' WHERE upgrade_id = " + upgradeId);
+                } else {
+                  success = false;
+                  LOG.error("Unable to populate column upgrade_package for record in table upgrade with id " + upgradeId);
+                }
+              }
+            }
+          } catch (Exception e) {
+            success = false;
+            e.printStackTrace();
+            LOG.error("Unable to populate the upgrade_type and upgrade_package columns of the upgrade table. " + e);
+          }
+        }
+      }
+    } catch (Exception e) {
+      success = false;
+      e.printStackTrace();
+      LOG.error("Failed to retrieve records from the upgrade table to populate the upgrade_type and upgrade_package columns. Exception: " + e);
+    } finally {
+      try {
+        if (rs != null) {
+          rs.close();
+        }
+        if (statement != null) {
+          statement.close();
+        }
+      } catch (SQLException e) {
+        ;
+      }
+    }
+    return success;
+  }
+
+  /**
+   * Find the single Repo Version for the given stack and version, and return its upgrade_package column.
+   * Because the upgrade_package column is going to be removed from this entity, must use raw SQL
+   * instead of the entity class.
+   * @param stack Stack
+   * @param version Stack version
+   * @return The value of the upgrade_package column, or null if not found.
+   */
+
+  private String calculateUpgradePackage(StackEntity stack, String version) {
+    String upgradePackage = null;
+    // Find the corresponding repo_version, and extract its upgrade_package
+    if (null != version && null != stack) {
+      RepositoryVersionEntity repoVersion = repositoryVersionDAO.findByStackNameAndVersion(stack.getStackName(), version);
+
+      Statement statement = null;
+      ResultSet rs = null;
+      try {
+        statement = dbAccessor.getConnection().createStatement();
+        if (statement != null) {
+          // Need to use SQL since the schema is changing and the entity will no longer have the upgrade_package column.
+          rs = statement.executeQuery("SELECT upgrade_package FROM repo_version WHERE repo_version_id = " + repoVersion.getId());
+          if (rs != null && rs.next()) {
+            upgradePackage = rs.getString("upgrade_package");
+          }
+        }
+      } catch (Exception e) {
+        LOG.error("Failed to retrieve upgrade_package for repo_version record with id " + repoVersion.getId() + ". Exception: " + e.getMessage());
+      } finally {
+        try {
+          if (rs != null) {
+            rs.close();
+          }
+          if (statement != null) {
+            statement.close();
+          }
+        } catch (SQLException e) {
+          ;
+        }
+      }
+    }
+    return upgradePackage;
+  }
+
+  /**
+   * If still on HDP 2.1, then no repo versions exist, so need to bootstrap the HDP 2.1 repo version,
+   * and mark it as CURRENT in the cluster_version table for the cluster, as well as the host_version table
+   * for all hosts.
+   */
+  @Transactional
+  public void bootstrapRepoVersionForHDP21() throws AmbariException, SQLException {
+    final String hardcodedInitialVersion = "2.1.0.0-0001";
+    AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
+    AmbariMetaInfo ambariMetaInfo = amc.getAmbariMetaInfo();
+    StackDAO stackDAO = injector.getInstance(StackDAO.class);
+    RepositoryVersionHelper repositoryVersionHelper = injector.getInstance(RepositoryVersionHelper.class);
+    RepositoryVersionDAO repositoryVersionDAO = injector.getInstance(RepositoryVersionDAO.class);
+    ClusterVersionDAO clusterVersionDAO = injector.getInstance(ClusterVersionDAO.class);
+    HostVersionDAO hostVersionDAO = injector.getInstance(HostVersionDAO.class);
+
+    Clusters clusters = amc.getClusters();
+    if (clusters == null) {
+      LOG.error("Unable to get Clusters entity.");
+      return;
+    }
+
+    for (Cluster cluster : clusters.getClusters().values()) {
+      ClusterEntity clusterEntity = clusterDAO.findByName(cluster.getClusterName());
+      final StackId stackId = cluster.getCurrentStackVersion();
+      LOG.info(MessageFormat.format("Analyzing cluster {0}, currently at stack {1} and version {2}",
+        cluster.getClusterName(), stackId.getStackName(), stackId.getStackVersion()));
+
+      if (stackId.getStackName().equalsIgnoreCase("HDP") && stackId.getStackVersion().equalsIgnoreCase("2.1")) {
+        final StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());
+        StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
+
+        LOG.info("Bootstrapping the versions since using HDP-2.1");
+
+        // The actual value is not known, so use this.
+        String displayName = stackId.getStackName() + "-" + hardcodedInitialVersion;
+
+        // However, the Repo URLs should be correct.
+        String operatingSystems = repositoryVersionHelper.serializeOperatingSystems(stackInfo.getRepositories());
+
+        // Create the Repo Version if it doesn't already exist.
+        RepositoryVersionEntity repoVersionEntity = repositoryVersionDAO.findByDisplayName(displayName);
+        if (null != repoVersionEntity) {
+          LOG.info(MessageFormat.format("A Repo Version already exists with Display Name: {0}", displayName));
+        } else {
+          final long repoVersionIdSeq = repositoryVersionDAO.findMaxId("id");
+          // Safe to attempt to add the sequence if it doesn't exist already.
+          addSequence("repo_version_id_seq", repoVersionIdSeq, false);
+
+          repoVersionEntity = repositoryVersionDAO.create(
+            stackEntity, hardcodedInitialVersion, displayName, operatingSystems);
+          LOG.info(MessageFormat.format("Created Repo Version with ID: {0,number,#}\n, Display Name: {1}, Repo URLs: {2}\n",
+            repoVersionEntity.getId(), displayName, operatingSystems));
+        }
+
+        // Create the Cluster Version if it doesn't already exist.
+        ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStackAndVersion(cluster.getClusterName(),
+          stackId, hardcodedInitialVersion);
+
+        if (null != clusterVersionEntity) {
+          LOG.info(MessageFormat.format("A Cluster Version version for cluster: {0}, version: {1}, already exists; its state is {2}.",
+            cluster.getClusterName(), clusterVersionEntity.getRepositoryVersion().getVersion(), clusterVersionEntity.getState()));
+
+          // If there are not CURRENT cluster versions, make this one the CURRENT one.
+          if (clusterVersionEntity.getState() != RepositoryVersionState.CURRENT &&
+            clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).isEmpty()) {
+            clusterVersionEntity.setState(RepositoryVersionState.CURRENT);
+            clusterVersionDAO.merge(clusterVersionEntity);
+          }
+        } else {
+          final long clusterVersionIdSeq = clusterVersionDAO.findMaxId("id");
+          // Safe to attempt to add the sequence if it doesn't exist already.
+          addSequence("cluster_version_id_seq", clusterVersionIdSeq, false);
+
+          clusterVersionEntity = clusterVersionDAO.create(clusterEntity, repoVersionEntity, RepositoryVersionState.CURRENT,
+            System.currentTimeMillis(), System.currentTimeMillis(), "admin");
+          LOG.info(MessageFormat.format("Created Cluster Version with ID: {0,number,#}, cluster: {1}, version: {2}, state: {3}.",
+            clusterVersionEntity.getId(), cluster.getClusterName(), clusterVersionEntity.getRepositoryVersion().getVersion(),
+            clusterVersionEntity.getState()));
+        }
+
+        // Create the Host Versions if they don't already exist.
+        Collection<HostEntity> hosts = clusterEntity.getHostEntities();
+        boolean addedAtLeastOneHost = false;
+        if (null != hosts && !hosts.isEmpty()) {
+          for (HostEntity hostEntity : hosts) {
+            HostVersionEntity hostVersionEntity = hostVersionDAO.findByClusterStackVersionAndHost(cluster.getClusterName(),
+              stackId, hardcodedInitialVersion, hostEntity.getHostName());
+
+            if (null != hostVersionEntity) {
+              LOG.info(MessageFormat.format("A Host Version version for cluster: {0}, version: {1}, host: {2}, already exists; its state is {3}.",
+                cluster.getClusterName(), hostVersionEntity.getRepositoryVersion().getVersion(),
+                hostEntity.getHostName(), hostVersionEntity.getState()));
+
+              if (hostVersionEntity.getState() != RepositoryVersionState.CURRENT &&
+                hostVersionDAO.findByClusterHostAndState(cluster.getClusterName(), hostEntity.getHostName(),
+                  RepositoryVersionState.CURRENT).isEmpty()) {
+                hostVersionEntity.setState(RepositoryVersionState.CURRENT);
+                hostVersionDAO.merge(hostVersionEntity);
+              }
+            } else {
+              // This should only be done the first time.
+              if (!addedAtLeastOneHost) {
+                final long hostVersionIdSeq = hostVersionDAO.findMaxId("id");
+                // Safe to attempt to add the sequence if it doesn't exist already.
+                addSequence("host_version_id_seq", hostVersionIdSeq, false);
+                addedAtLeastOneHost = true;
+              }
+
+              hostVersionEntity = new HostVersionEntity(hostEntity, repoVersionEntity, RepositoryVersionState.CURRENT);
+              hostVersionDAO.create(hostVersionEntity);
+              LOG.info(MessageFormat.format("Created Host Version with ID: {0,number,#}, cluster: {1}, version: {2}, host: {3}, state: {4}.",
+                hostVersionEntity.getId(), cluster.getClusterName(), hostVersionEntity.getRepositoryVersion().getVersion(),
+                hostEntity.getHostName(), hostVersionEntity.getState()));
+            }
+          }
+        } else {
+          LOG.info(MessageFormat.format("Not inserting any Host Version records since cluster {0} does not have any hosts.",
+            cluster.getClusterName()));
+        }
+      }
+    }
+  }
+
+  /**
    * Adds the following columns to the {@value #UPGRADE_TABLE} table:
    * <ul>
    * <li>{@value #DOWNGRADE_ALLOWED_COLUMN}</li>
@@ -234,7 +606,7 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
     Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
     for (final Cluster cluster : clusterMap.values()) {
       final AlertDefinitionEntity alertDefinitionEntity = alertDefinitionDAO.findByName(
-          cluster.getClusterId(), "journalnode_process");
+        cluster.getClusterId(), "journalnode_process");
 
       if (alertDefinitionEntity != null) {
         String source = alertDefinitionEntity.getSource();
@@ -275,16 +647,16 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
 
     rootJson.getAsJsonObject("reporting").getAsJsonObject("ok").remove("text");
     rootJson.getAsJsonObject("reporting").getAsJsonObject("ok").addProperty(
-            "text", "HTTP {0} response in {2:.3f}s");
+      "text", "HTTP {0} response in {2:.3f}s");
 
     rootJson.getAsJsonObject("reporting").getAsJsonObject("warning").remove("text");
     rootJson.getAsJsonObject("reporting").getAsJsonObject("warning").addProperty(
-            "text", "HTTP {0} response from {1} in {2:.3f}s ({3})");
+      "text", "HTTP {0} response from {1} in {2:.3f}s ({3})");
     rootJson.getAsJsonObject("reporting").getAsJsonObject("warning").remove("value");
 
     rootJson.getAsJsonObject("reporting").getAsJsonObject("critical").remove("text");
     rootJson.getAsJsonObject("reporting").getAsJsonObject("critical").addProperty("text",
-            "Connection failed to {1} ({3})");
+      "Connection failed to {1} ({3})");
     rootJson.getAsJsonObject("reporting").getAsJsonObject("critical").remove("value");
 
     return rootJson.toString();
@@ -308,7 +680,7 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
 
   protected void updateHDFSConfigs() throws AmbariException {
     AmbariManagementController ambariManagementController = injector.getInstance(
-            AmbariManagementController.class);
+      AmbariManagementController.class);
     Map<String, Cluster> clusterMap = getCheckedClusterMap(ambariManagementController.getClusters());
 
     for (final Cluster cluster : clusterMap.values()) {
@@ -357,7 +729,7 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
     for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
       StackId stackId = cluster.getCurrentStackVersion();
       if (stackId != null && stackId.getStackName().equals("HDP") &&
-               VersionUtils.compareVersions(stackId.getStackVersion(), "2.2") >= 0) {
+        VersionUtils.compareVersions(stackId.getStackVersion(), "2.2") >= 0) {
         Config hbaseEnvConfig = cluster.getDesiredConfigByType(HBASE_ENV_CONFIG);
         if (hbaseEnvConfig != null) {
           String content = hbaseEnvConfig.getProperties().get(CONTENT_PROPERTY);

http://git-wip-us.apache.org/repos/asf/ambari/blob/d30b5f03/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
index 69e1287..3918ec6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
@@ -18,6 +18,7 @@
 
 package org.apache.ambari.server.upgrade;
 
+import static org.easymock.EasyMock.anyLong;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.createMockBuilder;
@@ -32,6 +33,10 @@ import static org.easymock.EasyMock.verify;
 
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -45,16 +50,29 @@ import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
 import org.apache.ambari.server.orm.dao.DaoUtils;
+import org.apache.ambari.server.orm.dao.HostVersionDAO;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.orm.entities.HostVersionEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.stack.StackManagerFactory;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.RepositoryVersionState;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.stack.OsFamily;
+import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
 import org.easymock.EasyMockSupport;
@@ -71,6 +89,7 @@ import com.google.inject.Injector;
 import com.google.inject.Module;
 import com.google.inject.Provider;
 import com.google.inject.persist.PersistService;
+import java.lang.reflect.Field;
 
 /**
  * {@link org.apache.ambari.server.upgrade.UpgradeCatalog213} unit tests.
@@ -81,6 +100,13 @@ public class UpgradeCatalog213Test {
   private EntityManager entityManager = createNiceMock(EntityManager.class);
   private UpgradeCatalogHelper upgradeCatalogHelper;
   private StackEntity desiredStackEntity;
+  private AmbariManagementController amc = createNiceMock(AmbariManagementController.class);
+  private AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class);
+  private StackDAO stackDAO = createNiceMock(StackDAO.class);
+  private RepositoryVersionDAO repositoryVersionDAO = createNiceMock(RepositoryVersionDAO.class);
+  private ClusterVersionDAO clusterVersionDAO = createNiceMock(ClusterVersionDAO.class);
+  private HostVersionDAO hostVersionDAO = createNiceMock(HostVersionDAO.class);
+  private ClusterDAO clusterDAO = createNiceMock(ClusterDAO.class);
 
   private IMocksControl mocksControl = EasyMock.createControl();
 
@@ -118,6 +144,7 @@ public class UpgradeCatalog213Test {
       public void configure(Binder binder) {
         binder.bind(DBAccessor.class).toInstance(dbAccessor);
         binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+        binder.bind(EntityManager.class).toInstance(entityManager);
       }
     };
 
@@ -129,10 +156,46 @@ public class UpgradeCatalog213Test {
 
   @Test
   public void testExecuteDMLUpdates() throws Exception {
+    // TODO AMBARI-13001, readd unit test section.
+    /*
+    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
+    Configuration configuration = createNiceMock(Configuration.class);
+    Connection connection = createNiceMock(Connection.class);
+    Statement statement = createNiceMock(Statement.class);
+    ResultSet resultSet = createNiceMock(ResultSet.class);
+    expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes();
+    dbAccessor.getConnection();
+    expectLastCall().andReturn(connection).anyTimes();
+    connection.createStatement();
+    expectLastCall().andReturn(statement).anyTimes();
+    statement.executeQuery(anyObject(String.class));
+    expectLastCall().andReturn(resultSet).anyTimes();
+
+    // Technically, this is a DDL, but it has to be ran during the DML portion
+    // because it requires the persistence layer to be started.
+    UpgradeSectionDDL upgradeSectionDDL = new UpgradeSectionDDL();
+
+    // Execute any DDL schema changes
+    upgradeSectionDDL.execute(dbAccessor);
+
+    // Begin DML verifications
+    verifyBootstrapHDP21();
+
+    // Replay main sections
+    replay(dbAccessor, configuration, resultSet, connection, statement);
+
+
+    AbstractUpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
+    Class<?> c = AbstractUpgradeCatalog.class;
+    Field f = c.getDeclaredField("configuration");
+    f.setAccessible(true);
+    f.set(upgradeCatalog, configuration);
+    */
+
+    Method updateStormConfigs = UpgradeCatalog213.class.getDeclaredMethod("updateStormConfigs");
     Method updateAMSConfigs = UpgradeCatalog213.class.getDeclaredMethod("updateAMSConfigs");
     Method updateHDFSConfigs = UpgradeCatalog213.class.getDeclaredMethod("updateHDFSConfigs");
     Method updateKafkaConfigs = UpgradeCatalog213.class.getDeclaredMethod("updateKafkaConfigs");
-    Method updateStormConfigs = UpgradeCatalog213.class.getDeclaredMethod("updateStormConfigs");
     Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
     Method updateHbaseEnvConfig = UpgradeCatalog213.class.getDeclaredMethod("updateHbaseEnvConfig");
     Method updateZookeeperLog4j = UpgradeCatalog213.class.getDeclaredMethod("updateZookeeperLog4j");
@@ -140,16 +203,16 @@ public class UpgradeCatalog213Test {
     Method updateAlertDefinitions = UpgradeCatalog213.class.getDeclaredMethod("updateAlertDefinitions");
 
     UpgradeCatalog213 upgradeCatalog213 = createMockBuilder(UpgradeCatalog213.class)
-        .addMockedMethod(updateAMSConfigs)
-        .addMockedMethod(updateHDFSConfigs)
-        .addMockedMethod(updateStormConfigs)
-        .addMockedMethod(addNewConfigurationsFromXml)
-        .addMockedMethod(updateHbaseEnvConfig)
-        .addMockedMethod(updateAlertDefinitions)
-        .addMockedMethod(updateKafkaConfigs)
-        .addMockedMethod(updateZookeeperLog4j)
-        .addMockedMethod(updateHadoopEnvConfig)
-        .createMock();
+      .addMockedMethod(updateAMSConfigs)
+      .addMockedMethod(updateHDFSConfigs)
+      .addMockedMethod(updateStormConfigs)
+      .addMockedMethod(addNewConfigurationsFromXml)
+      .addMockedMethod(updateHbaseEnvConfig)
+      .addMockedMethod(updateAlertDefinitions)
+      .addMockedMethod(updateKafkaConfigs)
+      .addMockedMethod(updateZookeeperLog4j)
+      .addMockedMethod(updateHadoopEnvConfig)
+      .createMock();
 
     upgradeCatalog213.updateHbaseEnvConfig();
     expectLastCall().once();
@@ -177,11 +240,98 @@ public class UpgradeCatalog213Test {
     verify(upgradeCatalog213);
   }
 
+  /**
+   * Verify that when bootstrapping HDP 2.1, records get inserted into the
+   * repo_version, cluster_version, and host_version tables.
+   * @throws AmbariException
+   */
+  private void verifyBootstrapHDP21() throws Exception, AmbariException {
+    final String stackName = "HDP";
+    final String stackVersion = "2.1";
+    final String stackNameAndVersion = stackName + "-" + stackVersion;
+    final String buildNumber = "2.1.0.0-0001";
+    final String stackAndBuild = stackName + "-" + buildNumber;
+    final String clusterName = "c1";
+
+    expect(amc.getAmbariMetaInfo()).andReturn(metaInfo);
+
+    // Mock the actions to bootstrap if using HDP 2.1
+    Clusters clusters = createNiceMock(Clusters.class);
+    expect(amc.getClusters()).andReturn(clusters);
+
+    Map<String, Cluster> clusterHashMap = new HashMap<String, Cluster>();
+    Cluster cluster = createNiceMock(Cluster.class);
+    clusterHashMap.put(clusterName, cluster);
+    expect(clusters.getClusters()).andReturn(clusterHashMap);
+
+    StackId stackId = new StackId(stackNameAndVersion);
+    expect(cluster.getCurrentStackVersion()).andReturn(stackId);
+
+    StackInfo stackInfo = new StackInfo();
+    stackInfo.setVersion(buildNumber);
+    expect(metaInfo.getStack(stackName, stackVersion)).andReturn(stackInfo);
+
+    StackEntity stackEntity = createNiceMock(StackEntity.class);
+    expect(stackEntity.getStackName()).andReturn(stackName);
+    expect(stackEntity.getStackVersion()).andReturn(stackVersion);
+
+    expect(stackDAO.find(stackName, stackVersion)).andReturn(stackEntity);
+
+    replay(amc, metaInfo, clusters, cluster, stackEntity, stackDAO);
+
+    // Mock more function calls
+    // Repository Version
+    RepositoryVersionEntity repositoryVersionEntity = createNiceMock(RepositoryVersionEntity.class);
+    expect(repositoryVersionDAO.findByDisplayName(stackAndBuild)).andReturn(null);
+    expect(repositoryVersionDAO.findMaxId("id")).andReturn(0L);
+    expect(repositoryVersionDAO.findAll()).andReturn(Collections.<RepositoryVersionEntity>emptyList());
+    expect(repositoryVersionDAO.create(anyObject(StackEntity.class), anyObject(String.class), anyObject(String.class), anyObject(String.class))).andReturn(repositoryVersionEntity);
+    expect(repositoryVersionEntity.getId()).andReturn(1L);
+    expect(repositoryVersionEntity.getVersion()).andReturn(buildNumber);
+    replay(repositoryVersionDAO, repositoryVersionEntity);
+
+    // Cluster Version
+    ClusterVersionEntity clusterVersionEntity = createNiceMock(ClusterVersionEntity.class);
+    expect(clusterVersionEntity.getId()).andReturn(1L);
+    expect(clusterVersionEntity.getState()).andReturn(RepositoryVersionState.CURRENT);
+    expect(clusterVersionEntity.getRepositoryVersion()).andReturn(repositoryVersionEntity);
+
+    expect(clusterVersionDAO.findByClusterAndStackAndVersion(anyObject(String.class), anyObject(StackId.class), anyObject(String.class))).andReturn(null);
+    expect(clusterVersionDAO.findMaxId("id")).andReturn(0L);
+    expect(clusterVersionDAO.findAll()).andReturn(Collections.<ClusterVersionEntity>emptyList());
+    expect(clusterVersionDAO.create(anyObject(ClusterEntity.class), anyObject(RepositoryVersionEntity.class), anyObject(RepositoryVersionState.class), anyLong(), anyLong(), anyObject(String.class))).andReturn(clusterVersionEntity);
+    replay(clusterVersionDAO, clusterVersionEntity);
+
+    // Host Version
+    ClusterEntity clusterEntity = createNiceMock(ClusterEntity.class);
+    expect(clusterEntity.getClusterName()).andReturn(clusterName).anyTimes();
+    expect(clusterDAO.findByName(anyObject(String.class))).andReturn(clusterEntity);
+
+    Collection<HostEntity> hostEntities = new ArrayList<HostEntity>();
+    HostEntity hostEntity1 = createNiceMock(HostEntity.class);
+    HostEntity hostEntity2 = createNiceMock(HostEntity.class);
+    expect(hostEntity1.getHostName()).andReturn("host1");
+    expect(hostEntity2.getHostName()).andReturn("host2");
+    hostEntities.add(hostEntity1);
+    hostEntities.add(hostEntity2);
+    expect(clusterEntity.getHostEntities()).andReturn(hostEntities);
+
+    expect(hostVersionDAO.findByClusterStackVersionAndHost(anyObject(String.class), anyObject(StackId.class), anyObject(String.class), anyObject(String.class))).andReturn(null);
+    expect(hostVersionDAO.findMaxId("id")).andReturn(0L);
+    expect(hostVersionDAO.findAll()).andReturn(Collections.<HostVersionEntity>emptyList());
+
+    replay(clusterEntity, clusterDAO, hostVersionDAO, hostEntity1, hostEntity2);
+  }
+
   @Test
   public void testExecuteUpgradePreDMLUpdates() throws Exception {
     Method executeStackPreDMLUpdates = UpgradeCatalog213.class.getDeclaredMethod("executeUpgradePreDMLUpdates");
+    Method executeStackUpgradeDDLUpdates = UpgradeCatalog213.class.getDeclaredMethod("executeStackUpgradeDDLUpdates");
+    Method bootstrapRepoVersionForHDP21 = UpgradeCatalog213.class.getDeclaredMethod("bootstrapRepoVersionForHDP21");
 
     final UpgradeCatalog213 upgradeCatalog213 = createMockBuilder(UpgradeCatalog213.class)
+      .addMockedMethod(executeStackUpgradeDDLUpdates)
+      .addMockedMethod(bootstrapRepoVersionForHDP21)
       .addMockedMethod(executeStackPreDMLUpdates).createMock();
 
     final Injector mockInjector = Guice.createInjector(new AbstractModule() {
@@ -190,12 +340,19 @@ public class UpgradeCatalog213Test {
         bind(UpgradeCatalog213.class).toInstance(upgradeCatalog213);
         bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
         bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+        bind(EntityManager.class).toInstance(entityManager);
       }
     });
 
     upgradeCatalog213.executeUpgradePreDMLUpdates();
     expectLastCall().once();
 
+    upgradeCatalog213.executeStackUpgradeDDLUpdates();
+    expectLastCall().once();
+
+    upgradeCatalog213.bootstrapRepoVersionForHDP21();
+    expectLastCall().once();
+
     replay(upgradeCatalog213);
     mockInjector.getInstance(UpgradeCatalog213.class).executePreDMLUpdates();
 
@@ -225,6 +382,7 @@ public class UpgradeCatalog213Test {
         bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
         bind(ConfigHelper.class).toInstance(mockConfigHelper);
         bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
 
         bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
         bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
@@ -264,6 +422,7 @@ public class UpgradeCatalog213Test {
       protected void configure() {
         bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
         bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
 
         bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
         bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
@@ -307,6 +466,7 @@ public class UpgradeCatalog213Test {
         bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
         bind(ConfigHelper.class).toInstance(mockConfigHelper);
         bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
 
         bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
         bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
@@ -332,29 +492,29 @@ public class UpgradeCatalog213Test {
     Method updateAmsHbaseEnvContent = UpgradeCatalog213.class.getDeclaredMethod("updateAmsHbaseEnvContent", String.class);
     UpgradeCatalog213 upgradeCatalog213 = new UpgradeCatalog213(injector);
     String oldContent = "export HBASE_CLASSPATH=${HBASE_CLASSPATH}\n" +
-        "\n" +
-        "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
-        "export HBASE_HEAPSIZE={{hbase_heapsize}}\n" +
-        "\n" +
-        "{% if java_version &lt; 8 %}\n" +
-        "export HBASE_MASTER_OPTS=\" -XX:PermSize=64m -XX:MaxPermSize={{hbase_master_maxperm_size}} -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\n" +
-        "export HBASE_REGIONSERVER_OPTS=\"-XX:MaxPermSize=128m -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n" +
-        "{% else %}\n" +
-        "export HBASE_MASTER_OPTS=\" -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\n" +
-        "export HBASE_REGIONSERVER_OPTS=\" -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n" +
-        "{% endif %}\n";
+      "\n" +
+      "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
+      "export HBASE_HEAPSIZE={{hbase_heapsize}}\n" +
+      "\n" +
+      "{% if java_version &lt; 8 %}\n" +
+      "export HBASE_MASTER_OPTS=\" -XX:PermSize=64m -XX:MaxPermSize={{hbase_master_maxperm_size}} -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\n" +
+      "export HBASE_REGIONSERVER_OPTS=\"-XX:MaxPermSize=128m -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n" +
+      "{% else %}\n" +
+      "export HBASE_MASTER_OPTS=\" -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\n" +
+      "export HBASE_REGIONSERVER_OPTS=\" -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n" +
+      "{% endif %}\n";
     String expectedContent = "export HBASE_CLASSPATH=${HBASE_CLASSPATH}\n" +
-        "\n" +
-        "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
-        "export HBASE_HEAPSIZE={{hbase_heapsize}}m\n" +
-        "\n" +
-        "{% if java_version &lt; 8 %}\n" +
-        "export HBASE_MASTER_OPTS=\" -XX:PermSize=64m -XX:MaxPermSize={{hbase_master_maxperm_size}}m -Xms{{hbase_heapsize}}m -Xmx{{hbase_heapsize}}m -Xmn{{hbase_master_xmn_size}}m -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\n" +
-        "export HBASE_REGIONSERVER_OPTS=\"-XX:MaxPermSize=128m -Xmn{{regionserver_xmn_size}}m -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}}m -Xmx{{regionserver_heapsize}}m\"\n" +
-        "{% else %}\n" +
-        "export HBASE_MASTER_OPTS=\" -Xms{{hbase_heapsize}}m -Xmx{{hbase_heapsize}}m -Xmn{{hbase_master_xmn_size}}m -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\n" +
-        "export HBASE_REGIONSERVER_OPTS=\" -Xmn{{regionserver_xmn_size}}m -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}}m -Xmx{{regionserver_heapsize}}m\"\n" +
-        "{% endif %}\n";
+      "\n" +
+      "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
+      "export HBASE_HEAPSIZE={{hbase_heapsize}}m\n" +
+      "\n" +
+      "{% if java_version &lt; 8 %}\n" +
+      "export HBASE_MASTER_OPTS=\" -XX:PermSize=64m -XX:MaxPermSize={{hbase_master_maxperm_size}}m -Xms{{hbase_heapsize}}m -Xmx{{hbase_heapsize}}m -Xmn{{hbase_master_xmn_size}}m -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\n" +
+      "export HBASE_REGIONSERVER_OPTS=\"-XX:MaxPermSize=128m -Xmn{{regionserver_xmn_size}}m -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}}m -Xmx{{regionserver_heapsize}}m\"\n" +
+      "{% else %}\n" +
+      "export HBASE_MASTER_OPTS=\" -Xms{{hbase_heapsize}}m -Xmx{{hbase_heapsize}}m -Xmn{{hbase_master_xmn_size}}m -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\n" +
+      "export HBASE_REGIONSERVER_OPTS=\" -Xmn{{regionserver_xmn_size}}m -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}}m -Xmx{{regionserver_heapsize}}m\"\n" +
+      "{% endif %}\n";
     String result = (String) updateAmsHbaseEnvContent.invoke(upgradeCatalog213, oldContent);
     Assert.assertEquals(expectedContent, result);
   }
@@ -364,9 +524,9 @@ public class UpgradeCatalog213Test {
     Method updateAmsEnvContent = UpgradeCatalog213.class.getDeclaredMethod("updateAmsEnvContent", String.class);
     UpgradeCatalog213 upgradeCatalog213 = new UpgradeCatalog213(injector);
     String oldContent = "# AMS Collector heapsize\n" +
-        "export AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}\n";
+      "export AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}\n";
     String expectedContent = "# AMS Collector heapsize\n" +
-        "export AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}m\n";
+      "export AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}m\n";
     String result = (String) updateAmsEnvContent.invoke(upgradeCatalog213, oldContent);
     Assert.assertEquals(expectedContent, result);
   }
@@ -403,6 +563,7 @@ public class UpgradeCatalog213Test {
         bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
         bind(ConfigHelper.class).toInstance(mockConfigHelper);
         bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
 
         bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
         bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
@@ -428,18 +589,18 @@ public class UpgradeCatalog213Test {
   public void testModifyJournalnodeProcessAlertSource() throws Exception {
     UpgradeCatalog213 upgradeCatalog213 = new UpgradeCatalog213(injector);
     String alertSource = "{\"uri\":\"{{hdfs-site/dfs.journalnode.http-address}}\",\"default_port\":8480," +
-        "\"type\":\"PORT\",\"reporting\":{\"ok\":{\"text\":\"TCP OK - {0:.3f}s response on port {1}\"}," +
-        "\"warning\":{\"text\":\"TCP OK - {0:.3f}s response on port {1}\",\"value\":1.5}," +
-        "\"critical\":{\"text\":\"Connection failed: {0} to {1}:{2}\",\"value\":5.0}}}";
+      "\"type\":\"PORT\",\"reporting\":{\"ok\":{\"text\":\"TCP OK - {0:.3f}s response on port {1}\"}," +
+      "\"warning\":{\"text\":\"TCP OK - {0:.3f}s response on port {1}\",\"value\":1.5}," +
+      "\"critical\":{\"text\":\"Connection failed: {0} to {1}:{2}\",\"value\":5.0}}}";
     String expected = "{\"reporting\":{\"ok\":{\"text\":\"HTTP {0} response in {2:.3f}s\"}," +
-        "\"warning\":{\"text\":\"HTTP {0} response from {1} in {2:.3f}s ({3})\"}," +
-        "\"critical\":{\"text\":\"Connection failed to {1} ({3})\"}},\"type\":\"WEB\"," +
-        "\"uri\":{\"http\":\"{{hdfs-site/dfs.journalnode.http-address}}\"," +
-        "\"https\":\"{{hdfs-site/dfs.journalnode.https-address}}\"," +
-        "\"kerberos_keytab\":\"{{hdfs-site/dfs.web.authentication.kerberos.keytab}}\"," +
-        "\"kerberos_principal\":\"{{hdfs-site/dfs.web.authentication.kerberos.principal}}\"," +
-        "\"https_property\":\"{{hdfs-site/dfs.http.policy}}\"," +
-        "\"https_property_value\":\"HTTPS_ONLY\",\"connection_timeout\":5.0}}";
+      "\"warning\":{\"text\":\"HTTP {0} response from {1} in {2:.3f}s ({3})\"}," +
+      "\"critical\":{\"text\":\"Connection failed to {1} ({3})\"}},\"type\":\"WEB\"," +
+      "\"uri\":{\"http\":\"{{hdfs-site/dfs.journalnode.http-address}}\"," +
+      "\"https\":\"{{hdfs-site/dfs.journalnode.https-address}}\"," +
+      "\"kerberos_keytab\":\"{{hdfs-site/dfs.web.authentication.kerberos.keytab}}\"," +
+      "\"kerberos_principal\":\"{{hdfs-site/dfs.web.authentication.kerberos.principal}}\"," +
+      "\"https_property\":\"{{hdfs-site/dfs.http.policy}}\"," +
+      "\"https_property_value\":\"HTTPS_ONLY\",\"connection_timeout\":5.0}}";
     Assert.assertEquals(expected, upgradeCatalog213.modifyJournalnodeProcessAlertSource(alertSource));
   }
 
@@ -471,6 +632,7 @@ public class UpgradeCatalog213Test {
         bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
         bind(ConfigHelper.class).toInstance(mockConfigHelper);
         bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
 
         bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
         bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
@@ -502,6 +664,17 @@ public class UpgradeCatalog213Test {
         binder.bind(DBAccessor.class).toInstance(dbAccessor);
         binder.bind(EntityManager.class).toInstance(entityManager);
         binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+        binder.bind(DaoUtils.class).toInstance(createNiceMock(DaoUtils.class));
+        binder.bind(ClusterDAO.class).toInstance(clusterDAO);
+        binder.bind(RepositoryVersionHelper.class).toInstance(createNiceMock(RepositoryVersionHelper.class));
+        binder.bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
+        binder.bind(AmbariManagementController.class).toInstance(amc);
+        binder.bind(AmbariMetaInfo.class).toInstance(metaInfo);
+        binder.bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
+        binder.bind(StackDAO.class).toInstance(stackDAO);
+        binder.bind(RepositoryVersionDAO.class).toInstance(repositoryVersionDAO);
+        binder.bind(ClusterVersionDAO.class).toInstance(clusterVersionDAO);
+        binder.bind(HostVersionDAO.class).toInstance(hostVersionDAO);
       }
     };
 
@@ -564,6 +737,43 @@ public class UpgradeCatalog213Test {
     Assert.assertEquals("2.1.3", upgradeCatalog.getTargetVersion());
   }
 
+  // *********** Inner Classes that represent sections of the DDL ***********
+  // ************************************************************************
+
+  /**
+   * Verify that the upgrade table has two columns added to it.
+   */
+  class UpgradeSectionDDL implements SectionDDL {
+
+    Capture<DBAccessor.DBColumnInfo> upgradeTablePackageNameColumnCapture = new Capture<DBAccessor.DBColumnInfo>();
+    Capture<DBAccessor.DBColumnInfo> upgradeTableUpgradeTypeColumnCapture = new Capture<DBAccessor.DBColumnInfo>();
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public void execute(DBAccessor dbAccessor) throws SQLException {
+      // Add columns
+      dbAccessor.addColumn(eq("upgrade"), capture(upgradeTablePackageNameColumnCapture));
+      dbAccessor.addColumn(eq("upgrade"), capture(upgradeTableUpgradeTypeColumnCapture));
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public void verify(DBAccessor dbAccessor) throws SQLException {
+      // Verification section
+      DBAccessor.DBColumnInfo packageNameCol = upgradeTablePackageNameColumnCapture.getValue();
+      Assert.assertEquals(String.class, packageNameCol.getType());
+      Assert.assertEquals("upgrade_package", packageNameCol.getName());
+
+      DBAccessor.DBColumnInfo upgradeTypeCol = upgradeTableUpgradeTypeColumnCapture.getValue();
+      Assert.assertEquals(String.class, upgradeTypeCol.getType());
+      Assert.assertEquals("upgrade_type", upgradeTypeCol.getName());
+    }
+  }
+
   @Test
   public void testShouldDDLsBeExecutedOnUpgrade() throws Exception {
     // GIVEN
@@ -577,6 +787,7 @@ public class UpgradeCatalog213Test {
     Capture<String> capturedPKColumn = EasyMock.newCapture();
     Capture<List<DBAccessor.DBColumnInfo>> capturedColumns = EasyMock.newCapture();
     Capture<DBAccessor.DBColumnInfo> capturedColumn = EasyMock.newCapture();
+    Capture<DBAccessor.DBColumnInfo> capturedHostRoleCommandColumn = EasyMock.newCapture();
 
     EasyMock.expect(mockedInjector.getInstance(DaoUtils.class)).andReturn(mockedDaoUtils);
     mockedInjector.injectMembers(anyObject(UpgradeCatalog.class));
@@ -595,6 +806,7 @@ public class UpgradeCatalog213Test {
 
     // addKerberosDescriptorTable
     mockedDbAccessor.createTable(capture(capturedTableName), capture(capturedColumns), capture(capturedPKColumn));
+    mockedDbAccessor.alterColumn(eq("host_role_command"), capture(capturedHostRoleCommandColumn));
 
     mocksControl.replay();
 


[24/50] [abbrv] ambari git commit: AMBARI-13518 Host cleanup should remove only stack directories from /usr/hdp (dsen)

Posted by nc...@apache.org.
AMBARI-13518 Host cleanup should remove only stack directories from /usr/hdp (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a8445c9a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a8445c9a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a8445c9a

Branch: refs/heads/branch-dev-patch-upgrade
Commit: a8445c9aa699622be486695fcfc6a1c9cf10a518
Parents: b9bb0d3
Author: Dmytro Sen <ds...@apache.org>
Authored: Thu Oct 22 17:18:18 2015 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Thu Oct 22 17:18:18 2015 +0300

----------------------------------------------------------------------
 .../ambari_agent/HostCheckReportFileHandler.py  | 44 ++++++++++--
 .../TestHostCheckReportFileHandler.py           | 72 ++++++++++++++------
 2 files changed, 89 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a8445c9a/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py b/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
index 794e427..1f87a73 100644
--- a/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
+++ b/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 
-'''
+"""
 Licensed to the Apache Software Foundation (ASF) under one
 or more contributor license agreements.  See the NOTICE file
 distributed with this work for additional information
@@ -16,15 +16,19 @@ distributed under the License is distributed on an "AS IS" BASIS,
 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
-'''
+"""
 
 import datetime
 import os.path
 import logging
+import re
 import traceback
 from AmbariConfig import AmbariConfig
-import ConfigParser;
+import ConfigParser
 
+HADOOP_ROOT_DIR = "/usr/hdp"
+HADOOP_PERM_REMOVE_LIST = ["current"]
+HADOOP_ITEMDIR_REGEX = "(\d\.){3}\d-\d{4}"
 logger = logging.getLogger(__name__)
 
 class HostCheckReportFileHandler:
@@ -83,6 +87,37 @@ class HostCheckReportFileHandler:
       logger.error("Can't write host check file at %s :%s " % (self.hostCheckCustomActionsFilePath, err.message))
       traceback.print_exc()
 
+  def _hdp_list_directory(self):
+    """
+    Return filtered list of /usr/hdp directory allowed to be removed
+    :rtype list
+    """
+
+    if not os.path.exists(HADOOP_ROOT_DIR):
+      return []
+
+    matcher = re.compile(HADOOP_ITEMDIR_REGEX)  # pre-compile regexp
+    folder_content = os.listdir(HADOOP_ROOT_DIR)
+    remove_list = []
+
+    remlist_items_count = 0
+
+    for item in folder_content:
+      full_path = "%s%s%s" % (HADOOP_ROOT_DIR, os.path.sep, item)
+      if item in HADOOP_PERM_REMOVE_LIST:
+        remove_list.append(full_path)
+        remlist_items_count += 1
+
+      if matcher.match(item) is not None:
+        remove_list.append(full_path)
+        remlist_items_count += 1
+
+    # if remove list have same length as target folder, assume that they are identical
+    if remlist_items_count == len(folder_content):
+      remove_list.append(HADOOP_ROOT_DIR)
+
+    return remove_list
+
   def writeHostCheckFile(self, hostInfo):
     if self.hostCheckFilePath is None:
       return
@@ -117,8 +152,7 @@ class HostCheckReportFileHandler:
         items = []
         for itemDetail in hostInfo['stackFoldersAndFiles']:
           items.append(itemDetail['name'])
-        if os.path.exists('/usr/hdp'):
-          items.append('/usr/hdp')
+        items += self._hdp_list_directory()
         config.add_section('directories')
         config.set('directories', 'dir_list', ','.join(items))
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a8445c9a/ambari-agent/src/test/python/ambari_agent/TestHostCheckReportFileHandler.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestHostCheckReportFileHandler.py b/ambari-agent/src/test/python/ambari_agent/TestHostCheckReportFileHandler.py
index d56ad8f..c595082 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestHostCheckReportFileHandler.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestHostCheckReportFileHandler.py
@@ -20,6 +20,7 @@ limitations under the License.
 
 from unittest import TestCase
 import unittest
+from mock.mock import patch
 import os
 import tempfile
 from ambari_agent.HostCheckReportFileHandler import HostCheckReportFileHandler
@@ -38,8 +39,8 @@ class TestHostCheckReportFileHandler(TestCase):
     config.set('agent', 'prefix', os.path.dirname(tmpfile))
 
     handler = HostCheckReportFileHandler(config)
-    dict = {}
-    handler.writeHostCheckFile(dict)
+    mydict = {}
+    handler.writeHostCheckFile(mydict)
 
     configValidator = ConfigParser.RawConfigParser()
     configPath = os.path.join(os.path.dirname(tmpfile), HostCheckReportFileHandler.HOST_CHECK_FILE)
@@ -56,16 +57,16 @@ class TestHostCheckReportFileHandler(TestCase):
     config.set('agent', 'prefix', os.path.dirname(tmpfile))
 
     handler = HostCheckReportFileHandler(config)
-    dict = {}
-    dict['hostHealth'] = {}
-    dict['existingUsers'] = []
-    dict['alternatives'] = []
-    dict['stackFoldersAndFiles'] = []
-    dict['hostHealth']['activeJavaProcs'] = []
-    dict['installedPackages'] = []
-    dict['existingRepos'] = []
+    mydict = {}
+    mydict['hostHealth'] = {}
+    mydict['existingUsers'] = []
+    mydict['alternatives'] = []
+    mydict['stackFoldersAndFiles'] = []
+    mydict['hostHealth']['activeJavaProcs'] = []
+    mydict['installedPackages'] = []
+    mydict['existingRepos'] = []
 
-    handler.writeHostCheckFile(dict)
+    handler.writeHostCheckFile(mydict)
 
     configValidator = ConfigParser.RawConfigParser()
     configPath = os.path.join(os.path.dirname(tmpfile), HostCheckReportFileHandler.HOST_CHECK_FILE)
@@ -96,19 +97,19 @@ class TestHostCheckReportFileHandler(TestCase):
 
     handler = HostCheckReportFileHandler(config)
 
-    dict = {}
-    dict['hostHealth'] = {}
-    dict['existingUsers'] = [{'name':'user1', 'homeDir':'/var/log', 'status':'Exists'}]
-    dict['alternatives'] = [
+    mydict = {}
+    mydict['hostHealth'] = {}
+    mydict['existingUsers'] = [{'name':'user1', 'homeDir':'/var/log', 'status':'Exists'}]
+    mydict['alternatives'] = [
       {'name':'/etc/alternatives/hadoop-conf', 'target':'/etc/hadoop/conf.dist'},
       {'name':'/etc/alternatives/hbase-conf', 'target':'/etc/hbase/conf.1'}
     ]
-    dict['stackFoldersAndFiles'] = [{'name':'/a/b', 'type':'directory'},{'name':'/a/b.txt', 'type':'file'}]
-    dict['hostHealth']['activeJavaProcs'] = [
+    mydict['stackFoldersAndFiles'] = [{'name':'/a/b', 'type':'directory'},{'name':'/a/b.txt', 'type':'file'}]
+    mydict['hostHealth']['activeJavaProcs'] = [
       {'pid':355,'hadoop':True,'command':'some command','user':'root'},
       {'pid':455,'hadoop':True,'command':'some command','user':'hdfs'}
     ]
-    handler.writeHostCheckFile(dict)
+    handler.writeHostCheckFile(mydict)
 
     configValidator = ConfigParser.RawConfigParser()
     configPath = os.path.join(os.path.dirname(tmpfile), HostCheckReportFileHandler.HOST_CHECK_FILE)
@@ -130,13 +131,13 @@ class TestHostCheckReportFileHandler(TestCase):
     self.chkItemsEqual(procs, ['455', '355'])
 
 
-    dict['installed_packages'] = [
+    mydict['installed_packages'] = [
       {'name':'hadoop','version':'3.2.3','repoName':'HDP'},
       {'name':'hadoop-lib','version':'3.2.3','repoName':'HDP'}
     ]
-    dict['existing_repos'] = ['HDP', 'HDP-epel']
+    mydict['existing_repos'] = ['HDP', 'HDP-epel']
     
-    handler.writeHostChecksCustomActionsFile(dict)
+    handler.writeHostChecksCustomActionsFile(mydict)
     configValidator = ConfigParser.RawConfigParser()
     configPath_ca = os.path.join(os.path.dirname(tmpfile), HostCheckReportFileHandler.HOST_CHECK_CUSTOM_ACTIONS_FILE)
     configValidator.read(configPath_ca)
@@ -150,13 +151,40 @@ class TestHostCheckReportFileHandler(TestCase):
     time = configValidator.get('metadata', 'created')
     self.assertTrue(time != None)
 
+  @patch("os.path.exists")
+  @patch("os.listdir")
+  def test_write_host_stack_list(self, list_mock, exists_mock):
+    exists_mock.return_value = True
+    list_mock.return_value = ["1.1.1.1-1234", "current", "test"]
+
+    tmpfile = tempfile.mktemp()
+
+    config = ConfigParser.RawConfigParser()
+    config.add_section('agent')
+    config.set('agent', 'prefix', os.path.dirname(tmpfile))
+
+    handler = HostCheckReportFileHandler(config)
+
+    mydict = {}
+    mydict['hostHealth'] = {}
+    mydict['stackFoldersAndFiles'] = [{'name':'/a/b', 'type':'directory'},{'name':'/a/b.txt', 'type':'file'}]
+
+    handler.writeHostCheckFile(mydict)
+
+    configValidator = ConfigParser.RawConfigParser()
+    configPath = os.path.join(os.path.dirname(tmpfile), HostCheckReportFileHandler.HOST_CHECK_FILE)
+    configValidator.read(configPath)
+
+    paths = configValidator.get('directories', 'dir_list')
+    self.chkItemsEqual(paths, ['/a/b', '/a/b.txt', '/usr/hdp/1.1.1.1-1234', '/usr/hdp/current'])
+
   def chkItemsEqual(self, commaDelimited, items):
     items1 = commaDelimited.split(',')
     items1.sort()
     items.sort()
     items1Str = ','.join(items1)
     items2Str = ','.join(items)
-    self.assertEquals(items1Str, items2Str)
+    self.assertEquals(items2Str, items1Str)
 
 if __name__ == "__main__":
   unittest.main(verbosity=2)


[23/50] [abbrv] ambari git commit: AMBARI-13504 Enable Ranger plugin properties should be validated to have same value as in Ranger service (dsen)

Posted by nc...@apache.org.
AMBARI-13504 Enable Ranger plugin properties should be validated to have same value as in Ranger service (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b9bb0d35
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b9bb0d35
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b9bb0d35

Branch: refs/heads/branch-dev-patch-upgrade
Commit: b9bb0d3518c592b07e4b23db53905c1de20c8529
Parents: 381c49f
Author: Dmytro Sen <ds...@apache.org>
Authored: Thu Oct 22 17:09:48 2015 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Thu Oct 22 17:09:48 2015 +0300

----------------------------------------------------------------------
 .../stacks/HDP/2.0.6/services/stack_advisor.py  |  11 +-
 .../stacks/HDP/2.2/services/stack_advisor.py    | 109 +++++++-
 .../stacks/2.0.6/common/test_stack_advisor.py   |  43 ++++
 .../stacks/2.2/common/test_stack_advisor.py     | 247 ++++++++++++++++++-
 4 files changed, 403 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b9bb0d35/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index 3db5bfd..d62f44b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -549,7 +549,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
 
     if include_zookeeper:
       zookeeper_hosts = self.getHostNamesWithComponent("ZOOKEEPER", "ZOOKEEPER_SERVER", services)
-      zookeeper_port = 2181     #default port
+      zookeeper_port = '2181'     #default port
       if 'zoo.cfg' in services['configurations'] and ('clientPort' in services['configurations']['zoo.cfg']['properties']):
         zookeeper_port = services['configurations']['zoo.cfg']['properties']['clientPort']
 
@@ -1190,6 +1190,15 @@ def getSiteProperties(configurations, siteName):
     return None
   return siteConfig.get("properties")
 
+def getServicesSiteProperties(services, siteName):
+  configurations = services.get("configurations")
+  if not configurations:
+    return None
+  siteConfig = configurations.get(siteName)
+  if siteConfig is None:
+    return None
+  return siteConfig.get("properties")
+
 def to_number(s):
   try:
     return int(re.sub("\D", "", s))

http://git-wip-us.apache.org/repos/asf/ambari/blob/b9bb0d35/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 3192ca2..cf9c91e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -751,13 +751,19 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     parentValidators = super(HDP22StackAdvisor, self).getServiceConfigurationValidators()
     childValidators = {
       "HDFS": {"hdfs-site": self.validateHDFSConfigurations,
-               "hadoop-env": self.validateHDFSConfigurationsEnv},
-      "YARN": {"yarn-env": self.validateYARNEnvConfigurations},
+               "hadoop-env": self.validateHDFSConfigurationsEnv,
+               "ranger-hdfs-plugin-properties": self.validateHDFSRangerPluginConfigurations},
+      "YARN": {"yarn-env": self.validateYARNEnvConfigurations,
+               "ranger-yarn-plugin-properties": self.validateYARNRangerPluginConfigurations},
       "HIVE": {"hiveserver2-site": self.validateHiveServer2Configurations,
                "hive-site": self.validateHiveConfigurations,
                "hive-env": self.validateHiveConfigurationsEnv},
       "HBASE": {"hbase-site": self.validateHBASEConfigurations,
-                "hbase-env": self.validateHBASEEnvConfigurations},
+                "hbase-env": self.validateHBASEEnvConfigurations,
+                "ranger-hbase-plugin-properties": self.validateHBASERangerPluginConfigurations},
+      "KNOX": {"ranger-knox-plugin-properties": self.validateKnoxRangerPluginConfigurations},
+      "KAFKA": {"ranger-kafka-plugin-properties": self.validateKafkaRangerPluginConfigurations},
+      "STORM": {"ranger-storm-plugin-properties": self.validateStormRangerPluginConfigurations},
       "MAPREDUCE2": {"mapred-site": self.validateMapReduce2Configurations},
       "TEZ": {"tez-site": self.validateTezConfigurations}
     }
@@ -859,7 +865,22 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
                         {"config-name": 'namenode_opt_newsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_newsize')},
                         {"config-name": 'namenode_opt_maxnewsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_maxnewsize')}]
     return self.toConfigurationValidationProblems(validationItems, "hadoop-env")
-  
+
+  def validateHDFSRangerPluginConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hdfs-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hdfs-plugin-enabled'] if ranger_plugin_properties else 'No'
+    if (ranger_plugin_enabled.lower() == 'yes'):
+      # ranger-hdfs-plugin must be enabled in ranger-env
+      ranger_env = getServicesSiteProperties(services, 'ranger-env')
+      if not ranger_env or not 'ranger-hdfs-plugin-enabled' in ranger_env or \
+          ranger_env['ranger-hdfs-plugin-enabled'].lower() != 'yes':
+        validationItems.append({"config-name": 'ranger-hdfs-plugin-enabled',
+                                "item": self.getWarnItem(
+                                  "ranger-hdfs-plugin-properties/ranger-hdfs-plugin-enabled must correspond ranger-env/ranger-hdfs-plugin-enabled")})
+    return self.toConfigurationValidationProblems(validationItems, "ranger-hdfs-plugin-properties")
+
+
   def validateHDFSConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     # We can not access property hadoop.security.authentication from the
     # other config (core-site). That's why we are using another heuristics here
@@ -1061,7 +1082,15 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
       authorization_item = self.getErrorItem("hive_security_authorization should not be None "
                                              "if hive.security.authorization.enabled is set")
       validationItems.append({"config-name": "hive_security_authorization", "item": authorization_item})
-
+    if "hive_security_authorization" in hive_env and \
+        str(hive_env["hive_security_authorization"]).lower() == "ranger":
+      # ranger-hive-plugin must be enabled in ranger-env
+      ranger_env = getServicesSiteProperties(services, 'ranger-env')
+      if not ranger_env or not 'ranger-hive-plugin-enabled' in ranger_env or \
+          ranger_env['ranger-hive-plugin-enabled'].lower() != 'yes':
+        validationItems.append({"config-name": 'hive_security_authorization',
+                                "item": self.getWarnItem(
+                                  "ranger-env/ranger-hive-plugin-enabled must be enabled when hive_security_authorization is set to Ranger")})
     return self.toConfigurationValidationProblems(validationItems, "hive-env")
 
   def validateHiveConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
@@ -1185,6 +1214,62 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
     return self.toConfigurationValidationProblems(validationItems, "hbase-env")
 
+  def validateHBASERangerPluginConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hbase-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hbase-plugin-enabled'] if ranger_plugin_properties else 'No'
+    if ranger_plugin_enabled.lower() == 'yes':
+      # ranger-hdfs-plugin must be enabled in ranger-env
+      ranger_env = getServicesSiteProperties(services, 'ranger-env')
+      if not ranger_env or not 'ranger-hbase-plugin-enabled' in ranger_env or \
+          ranger_env['ranger-hbase-plugin-enabled'].lower() != 'yes':
+        validationItems.append({"config-name": 'ranger-hbase-plugin-enabled',
+                                "item": self.getWarnItem(
+                                  "ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled must correspond ranger-env/ranger-hbase-plugin-enabled")})
+    return self.toConfigurationValidationProblems(validationItems, "ranger-hbase-plugin-properties")
+
+  def validateKnoxRangerPluginConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-knox-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-knox-plugin-enabled'] if ranger_plugin_properties else 'No'
+    if ranger_plugin_enabled.lower() == 'yes':
+      # ranger-hdfs-plugin must be enabled in ranger-env
+      ranger_env = getServicesSiteProperties(services, 'ranger-env')
+      if not ranger_env or not 'ranger-knox-plugin-enabled' in ranger_env or \
+          ranger_env['ranger-knox-plugin-enabled'].lower() != 'yes':
+        validationItems.append({"config-name": 'ranger-knox-plugin-enabled',
+                                "item": self.getWarnItem(
+                                  "ranger-knox-plugin-properties/ranger-knox-plugin-enabled must correspond ranger-env/ranger-knox-plugin-enabled")})
+    return self.toConfigurationValidationProblems(validationItems, "ranger-knox-plugin-properties")
+
+  def validateKafkaRangerPluginConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-kafka-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-kafka-plugin-enabled'] if ranger_plugin_properties else 'No'
+    if ranger_plugin_enabled.lower() == 'yes':
+      # ranger-hdfs-plugin must be enabled in ranger-env
+      ranger_env = getServicesSiteProperties(services, 'ranger-env')
+      if not ranger_env or not 'ranger-kafka-plugin-enabled' in ranger_env or \
+          ranger_env['ranger-kafka-plugin-enabled'].lower() != 'yes':
+        validationItems.append({"config-name": 'ranger-kafka-plugin-enabled',
+                                "item": self.getWarnItem(
+                                  "ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled must correspond ranger-env/ranger-kafka-plugin-enabled")})
+    return self.toConfigurationValidationProblems(validationItems, "ranger-kafka-plugin-properties")
+
+  def validateStormRangerPluginConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-storm-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-storm-plugin-enabled'] if ranger_plugin_properties else 'No'
+    if ranger_plugin_enabled.lower() == 'yes':
+      # ranger-hdfs-plugin must be enabled in ranger-env
+      ranger_env = getServicesSiteProperties(services, 'ranger-env')
+      if not ranger_env or not 'ranger-storm-plugin-enabled' in ranger_env or \
+          ranger_env['ranger-storm-plugin-enabled'].lower() != 'yes':
+        validationItems.append({"config-name": 'ranger-storm-plugin-enabled',
+                                "item": self.getWarnItem(
+                                  "ranger-storm-plugin-properties/ranger-storm-plugin-enabled must correspond ranger-env/ranger-storm-plugin-enabled")})
+    return self.toConfigurationValidationProblems(validationItems, "ranger-storm-plugin-properties")
+
   def validateYARNEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     validationItems = []
     if "yarn_cgroups_enabled" in properties:
@@ -1198,6 +1283,20 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
                               "item": self.getWarnItem("CPU Isolation should only be enabled if security is enabled")})
     return self.toConfigurationValidationProblems(validationItems, "yarn-env")
 
+  def validateYARNRangerPluginConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-yarn-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-yarn-plugin-enabled'] if ranger_plugin_properties else 'No'
+    if ranger_plugin_enabled.lower() == 'yes':
+      # ranger-hdfs-plugin must be enabled in ranger-env
+      ranger_env = getServicesSiteProperties(services, 'ranger-env')
+      if not ranger_env or not 'ranger-yarn-plugin-enabled' in ranger_env or \
+          ranger_env['ranger-yarn-plugin-enabled'].lower() != 'yes':
+        validationItems.append({"config-name": 'ranger-yarn-plugin-enabled',
+                                "item": self.getWarnItem(
+                                  "ranger-yarn-plugin-properties/ranger-yarn-plugin-enabled must correspond ranger-env/ranger-yarn-plugin-enabled")})
+    return self.toConfigurationValidationProblems(validationItems, "ranger-yarn-plugin-properties")
+
   def getMastersWithMultipleInstances(self):
     result = super(HDP22StackAdvisor, self).getMastersWithMultipleInstances()
     result.extend(['METRICS_COLLECTOR'])

http://git-wip-us.apache.org/repos/asf/ambari/blob/b9bb0d35/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index 85d6436..0b8430d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -1619,3 +1619,46 @@ class TestHDP206StackAdvisor(TestCase):
     recommendedDefaults = {}
     expected = {'level': 'ERROR', 'message': 'Value should be recommended for property1'}
     self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), expected)
+
+  def test_getServicesSiteProperties(self):
+    import imp, os
+    testDirectory = os.path.dirname(os.path.abspath(__file__))
+    hdp206StackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
+    stack_advisor = imp.load_source('stack_advisor', hdp206StackAdvisorPath)
+    services = {
+      "services":  [
+        {
+          "StackServices": {
+            "service_name": "RANGER"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "RANGER_ADMIN",
+                "hostnames": ["host1"]
+              }
+            }
+          ]
+        },
+        ],
+      "configurations": {
+        "admin-properties": {
+          "properties": {
+            "DB_FLAVOR": "NOT_EXISTING",
+            }
+        },
+        "ranger-admin-site": {
+          "properties": {
+            "ranger.service.http.port": "7777",
+            "ranger.service.http.enabled": "true",
+            }
+        }
+      }
+    }
+    expected = {
+      "ranger.service.http.port": "7777",
+      "ranger.service.http.enabled": "true",
+    }
+    siteProperties = stack_advisor.getServicesSiteProperties(services, "ranger-admin-site")
+    self.assertEquals(siteProperties, expected)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/b9bb0d35/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index 3362c94..2ce1cee 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -2916,7 +2916,35 @@ class TestHDP22StackAdvisor(TestCase):
     res = self.stackAdvisor.validateHiveConfigurationsEnv(properties, {}, configurations, {}, {})
     self.assertEquals(res, res_expected)
 
-    pass
+    # 2) fail: hive_security_authorization=Ranger but ranger plugin is disabled in ranger-env
+    properties = {"hive_security_authorization": "Ranger"}
+    configurations = {
+      "ranger-env":{
+        "properties":{
+          "ranger-hive-plugin-enabled":"No",
+        }
+      },
+      "hive-env":{
+        "properties":{
+          "hive_security_authorization": "Ranger",
+        }
+      }
+    }
+    services = {
+      "configurations": configurations
+    }
+    res_expected = []
+
+    services['configurations']['ranger-env']['properties']['ranger-hive-plugin-enabled'] = 'No'
+    res_expected = [{'config-type': 'hive-env',
+                     'message': 'ranger-env/ranger-hive-plugin-enabled must be enabled when hive_security_authorization is set to Ranger',
+                     'type': 'configuration',
+                     'config-name': 'hive_security_authorization',
+                     'level': 'WARN'}]
+
+    res = self.stackAdvisor.validateHiveConfigurationsEnv(properties, {}, configurations, services, {})
+    self.assertEquals(res, res_expected)
+
 
   def test_validateHiveConfigurations(self):
     properties = {"hive_security_authorization": "None",
@@ -3137,3 +3165,220 @@ class TestHDP22StackAdvisor(TestCase):
     }
     self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
     self.assertEquals(configurations, expected)
+
+  def test_validateHDFSRangerPluginConfigurations(self):
+    configurations = {}
+      # 1) ok: ranger plugin is enabled in ranger-env and ranger-hdfs-plugin-properties
+    recommendedDefaults = {}
+    properties = {}
+    configurations = {
+      "ranger-env":{
+        "properties":{
+          "ranger-hdfs-plugin-enabled":"Yes",
+          }
+      },
+      "ranger-hdfs-plugin-properties":{
+        "properties":{
+          "ranger-hdfs-plugin-enabled":"Yes",
+        }
+      }
+    }
+    services = {
+      "configurations": configurations
+    }
+    res_expected = []
+
+    res = self.stackAdvisor.validateHDFSRangerPluginConfigurations(properties, recommendedDefaults, configurations, services, {})
+    self.assertEquals(res, res_expected)
+
+    # 2) fail: ranger plugin is disabled in ranger-env
+    services['configurations']['ranger-env']['properties']['ranger-hdfs-plugin-enabled'] = 'No'
+    res_expected = [{'config-type': 'ranger-hdfs-plugin-properties',
+                     'message': 'ranger-hdfs-plugin-properties/ranger-hdfs-plugin-enabled must correspond ranger-env/ranger-hdfs-plugin-enabled',
+                     'type': 'configuration',
+                     'config-name': 'ranger-hdfs-plugin-enabled',
+                     'level': 'WARN'}]
+
+    res = self.stackAdvisor.validateHDFSRangerPluginConfigurations(properties, recommendedDefaults, configurations, services, {})
+    self.assertEquals(res, res_expected)
+
+  def test_validateYARNRangerPluginConfigurations(self):
+    configurations = {}
+    # 1) ok: ranger plugin is enabled in ranger-env and ranger-yarn-plugin-properties
+    recommendedDefaults = {}
+    properties = {}
+    configurations = {
+      "ranger-env":{
+        "properties":{
+          "ranger-yarn-plugin-enabled":"Yes",
+          }
+      },
+      "ranger-yarn-plugin-properties":{
+        "properties":{
+          "ranger-yarn-plugin-enabled":"Yes",
+          }
+      }
+    }
+    services = {
+      "configurations": configurations
+    }
+    res_expected = []
+
+    res = self.stackAdvisor.validateYARNRangerPluginConfigurations(properties, recommendedDefaults, configurations, services, {})
+    self.assertEquals(res, res_expected)
+
+    # 2) fail: ranger plugin is disabled in ranger-env
+    services['configurations']['ranger-env']['properties']['ranger-yarn-plugin-enabled'] = 'No'
+    res_expected = [{'config-type': 'ranger-yarn-plugin-properties',
+                     'message': 'ranger-yarn-plugin-properties/ranger-yarn-plugin-enabled must correspond ranger-env/ranger-yarn-plugin-enabled',
+                     'type': 'configuration',
+                     'config-name': 'ranger-yarn-plugin-enabled',
+                     'level': 'WARN'}]
+
+    res = self.stackAdvisor.validateYARNRangerPluginConfigurations(properties, recommendedDefaults, configurations, services, {})
+    self.assertEquals(res, res_expected)
+
+  def test_validateHBASERangerPluginConfigurations(self):
+    configurations = {}
+    # 1) ok: ranger plugin is enabled in ranger-env and ranger-hbase-plugin-properties
+    recommendedDefaults = {}
+    properties = {}
+    configurations = {
+      "ranger-env":{
+        "properties":{
+          "ranger-hbase-plugin-enabled":"Yes",
+          }
+      },
+      "ranger-hbase-plugin-properties":{
+        "properties":{
+          "ranger-hbase-plugin-enabled":"Yes",
+          }
+      }
+    }
+    services = {
+      "configurations": configurations
+    }
+    res_expected = []
+
+    res = self.stackAdvisor.validateHBASERangerPluginConfigurations(properties, recommendedDefaults, configurations, services, {})
+    self.assertEquals(res, res_expected)
+
+    # 2) fail: ranger plugin is disabled in ranger-env
+    services['configurations']['ranger-env']['properties']['ranger-hbase-plugin-enabled'] = 'No'
+    res_expected = [{'config-type': 'ranger-hbase-plugin-properties',
+                     'message': 'ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled must correspond ranger-env/ranger-hbase-plugin-enabled',
+                     'type': 'configuration',
+                     'config-name': 'ranger-hbase-plugin-enabled',
+                     'level': 'WARN'}]
+
+    res = self.stackAdvisor.validateHBASERangerPluginConfigurations(properties, recommendedDefaults, configurations, services, {})
+    self.assertEquals(res, res_expected)
+
+  def test_validateKnoxRangerPluginConfigurations(self):
+    configurations = {}
+    # 1) ok: ranger plugin is enabled in ranger-env and ranger-knox-plugin-properties
+    recommendedDefaults = {}
+    properties = {}
+    configurations = {
+      "ranger-env":{
+        "properties":{
+          "ranger-knox-plugin-enabled":"Yes",
+          }
+      },
+      "ranger-knox-plugin-properties":{
+        "properties":{
+          "ranger-knox-plugin-enabled":"Yes",
+          }
+      }
+    }
+    services = {
+      "configurations": configurations
+    }
+    res_expected = []
+
+    res = self.stackAdvisor.validateKnoxRangerPluginConfigurations(properties, recommendedDefaults, configurations, services, {})
+    self.assertEquals(res, res_expected)
+
+    # 2) fail: ranger plugin is disabled in ranger-env
+    services['configurations']['ranger-env']['properties']['ranger-knox-plugin-enabled'] = 'No'
+    res_expected = [{'config-type': 'ranger-knox-plugin-properties',
+                     'message': 'ranger-knox-plugin-properties/ranger-knox-plugin-enabled must correspond ranger-env/ranger-knox-plugin-enabled',
+                     'type': 'configuration',
+                     'config-name': 'ranger-knox-plugin-enabled',
+                     'level': 'WARN'}]
+
+    res = self.stackAdvisor.validateKnoxRangerPluginConfigurations(properties, recommendedDefaults, configurations, services, {})
+    self.assertEquals(res, res_expected)
+
+  def test_validateKafkaRangerPluginConfigurations(self):
+    configurations = {}
+    # 1) ok: ranger plugin is enabled in ranger-env and ranger-kafka-plugin-properties
+    recommendedDefaults = {}
+    properties = {}
+    configurations = {
+      "ranger-env":{
+        "properties":{
+          "ranger-kafka-plugin-enabled":"Yes",
+          }
+      },
+      "ranger-kafka-plugin-properties":{
+        "properties":{
+          "ranger-kafka-plugin-enabled":"Yes",
+          }
+      }
+    }
+    services = {
+      "configurations": configurations
+    }
+    res_expected = []
+
+    res = self.stackAdvisor.validateKafkaRangerPluginConfigurations(properties, recommendedDefaults, configurations, services, {})
+    self.assertEquals(res, res_expected)
+
+    # 2) fail: ranger plugin is disabled in ranger-env
+    services['configurations']['ranger-env']['properties']['ranger-kafka-plugin-enabled'] = 'No'
+    res_expected = [{'config-type': 'ranger-kafka-plugin-properties',
+                     'message': 'ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled must correspond ranger-env/ranger-kafka-plugin-enabled',
+                     'type': 'configuration',
+                     'config-name': 'ranger-kafka-plugin-enabled',
+                     'level': 'WARN'}]
+
+    res = self.stackAdvisor.validateKafkaRangerPluginConfigurations(properties, recommendedDefaults, configurations, services, {})
+    self.assertEquals(res, res_expected)
+
+  def test_validateStormRangerPluginConfigurations(self):
+    configurations = {}
+    # 1) ok: ranger plugin is enabled in ranger-env and ranger-storm-plugin-properties
+    recommendedDefaults = {}
+    properties = {}
+    configurations = {
+      "ranger-env":{
+        "properties":{
+          "ranger-storm-plugin-enabled":"Yes",
+          }
+      },
+      "ranger-storm-plugin-properties":{
+        "properties":{
+          "ranger-storm-plugin-enabled":"Yes",
+          }
+      }
+    }
+    services = {
+      "configurations": configurations
+    }
+    res_expected = []
+
+    res = self.stackAdvisor.validateStormRangerPluginConfigurations(properties, recommendedDefaults, configurations, services, {})
+    self.assertEquals(res, res_expected)
+
+    # 2) fail: ranger plugin is disabled in ranger-env
+    services['configurations']['ranger-env']['properties']['ranger-storm-plugin-enabled'] = 'No'
+    res_expected = [{'config-type': 'ranger-storm-plugin-properties',
+                     'message': 'ranger-storm-plugin-properties/ranger-storm-plugin-enabled must correspond ranger-env/ranger-storm-plugin-enabled',
+                     'type': 'configuration',
+                     'config-name': 'ranger-storm-plugin-enabled',
+                     'level': 'WARN'}]
+
+    res = self.stackAdvisor.validateStormRangerPluginConfigurations(properties, recommendedDefaults, configurations, services, {})
+    self.assertEquals(res, res_expected)
+


[50/50] [abbrv] ambari git commit: Merge remote-tracking branch 'origin/trunk' into branch-dev-patch-upgrade

Posted by nc...@apache.org.
Merge remote-tracking branch 'origin/trunk' into branch-dev-patch-upgrade


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b25c8a84
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b25c8a84
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b25c8a84

Branch: refs/heads/branch-dev-patch-upgrade
Commit: b25c8a8480044b125f077df6c308059b42270fb3
Parents: 3265d0b 2ac1744
Author: Nate Cole <nc...@hortonworks.com>
Authored: Fri Oct 23 10:46:42 2015 -0400
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Fri Oct 23 10:46:42 2015 -0400

----------------------------------------------------------------------
 .../app/scripts/controllers/mainCtrl.js         |   49 +-
 .../ui/admin-web/app/scripts/services/Auth.js   |    5 +
 .../app/views/modals/TimeoutWarning.html        |   28 +
 .../views/stackVersions/stackVersionPage.html   |    6 +-
 .../ambari_agent/HostCheckReportFileHandler.py  |   44 +-
 .../TestHostCheckReportFileHandler.py           |   72 +-
 .../libraries/functions/copy_tarball.py         |    2 +
 .../libraries/functions/ranger_functions.py     |    8 +-
 .../libraries/functions/ranger_functions_v2.py  |   39 +-
 .../libraries/functions/setup_ranger_plugin.py  |    6 +-
 .../functions/setup_ranger_plugin_xml.py        |    6 +-
 .../libraries/script/script.py                  |   68 +-
 .../metrics2/sink/timeline/TimelineMetric.java  |    7 +-
 .../timeline/cache/TimelineMetricsCache.java    |    2 +-
 .../cache/TimelineMetricsCacheTest.java         |    2 +-
 .../ambari-metrics-timelineservice/pom.xml      |   49 +-
 .../metrics/loadsimulator/LoadRunner.java       |    4 +-
 .../loadsimulator/MetricsLoadSimulator.java     |   17 +
 .../metrics/loadsimulator/data/AppID.java       |    6 +-
 .../jmetertest/AMSJMeterLoadTest.java           |  202 +++
 .../loadsimulator/jmetertest/AppGetMetric.java  |   57 +
 .../jmetertest/GetMetricRequestInfo.java        |   61 +
 .../jmetertest/JmeterTestPlanTask.java          |  269 ++++
 .../loadsimulator/net/RestMetricsSender.java    |    2 +-
 .../timeline/HBaseTimelineMetricStore.java      |    2 +-
 .../metrics/timeline/PhoenixHBaseAccessor.java  |   23 +-
 .../aggregators/TimelineMetricReadHelper.java   |    2 +-
 .../src/main/resources/loadsimulator/README     |   65 +
 .../loadsimulator/ams-jmeter.properties         |   56 +
 .../resources/loadsimulator/amsJmeterGraph.jmx  |  104 ++
 .../resources/loadsimulator/jmeter.properties   | 1172 ++++++++++++++++++
 .../loadsimulator/saveservice.properties        |  381 ++++++
 .../main/resources/metrics_def/AMS-HBASE.dat    |   18 +
 .../resources/metrics_def/FLUME_HANDLER.dat     |   40 +
 .../main/resources/metrics_def/KAFKA_BROKER.dat | 1104 +++++++++++++++++
 .../src/main/resources/metrics_def/NIMBUS.dat   |    7 +
 .../main/resources/ui_metrics_def/AMS-HBASE.dat |   26 +
 .../main/resources/ui_metrics_def/DATANODE.dat  |    4 +
 .../resources/ui_metrics_def/FLUME_HANDLER.dat  |   63 +
 .../src/main/resources/ui_metrics_def/HBASE.dat |   47 +
 .../src/main/resources/ui_metrics_def/HOST.dat  |   79 ++
 .../resources/ui_metrics_def/KAFKA_BROKER.dat   |   16 +
 .../main/resources/ui_metrics_def/NAMENODE.dat  |   30 +
 .../main/resources/ui_metrics_def/NIMBUS.dat    |   28 +
 .../resources/ui_metrics_def/NODEMANAGER.dat    |   33 +
 .../ui_metrics_def/RESOURCEMANAGER.dat          |   11 +
 .../metrics/timeline/ITClusterAggregator.java   |    5 +-
 .../metrics/timeline/ITMetricAggregator.java    |    3 +-
 .../metrics/timeline/MetricTestHelper.java      |    3 +-
 .../timeline/TestTimelineMetricStore.java       |    5 +-
 ambari-server/conf/unix/ambari.properties       |    1 +
 ambari-server/pom.xml                           |    3 +
 .../java/org/apache/ambari/server/Role.java     |    6 +
 .../ambari/server/bootstrap/BSRunner.java       |   28 +-
 .../ambari/server/bootstrap/SshHostInfo.java    |    7 +
 .../server/checks/ClientRetryPropertyCheck.java |    2 +-
 .../checks/HostsMasterMaintenanceCheck.java     |    4 +
 .../checks/HostsRepositoryVersionCheck.java     |    4 +
 .../server/configuration/Configuration.java     |   20 +
 .../AmbariManagementControllerImpl.java         |   48 +-
 .../ambari/server/controller/AmbariServer.java  |   26 +
 .../controller/ConfigurationResponse.java       |   43 +-
 .../ClusterStackVersionResourceProvider.java    |   13 +-
 .../HostStackVersionResourceProvider.java       |   13 +-
 .../PreUpgradeCheckResourceProvider.java        |   12 +-
 .../internal/UpgradeResourceProvider.java       |  203 ++-
 .../metrics/MetricsPaddingMethod.java           |   10 +-
 .../cache/TimelineMetricCacheProvider.java      |   29 +-
 .../cache/TimelineMetricsCacheSizeOfEngine.java |  137 ++
 .../ambari/server/orm/DBAccessorImpl.java       |   26 +-
 .../server/orm/dao/HostRoleCommandDAO.java      |   67 +
 .../orm/entities/HostRoleCommandEntity.java     |    4 +-
 .../server/orm/entities/UpgradeEntity.java      |   65 +-
 .../ambari/server/stack/MasterHostResolver.java |   23 +
 .../org/apache/ambari/server/state/Cluster.java |   19 +-
 .../org/apache/ambari/server/state/Config.java  |    4 +
 .../apache/ambari/server/state/ConfigImpl.java  |   24 +
 .../ambari/server/state/UpgradeHelper.java      |  104 +-
 .../server/state/cluster/ClusterImpl.java       |   60 +-
 .../ambari/server/state/stack/UpgradePack.java  |   11 +-
 .../state/stack/upgrade/ClusterGrouping.java    |    5 +-
 .../state/stack/upgrade/ColocatedGrouping.java  |    8 +-
 .../server/state/stack/upgrade/Grouping.java    |   30 +-
 .../stack/upgrade/ServiceCheckGrouping.java     |    2 +-
 .../state/stack/upgrade/StageWrapper.java       |   40 +-
 .../stack/upgrade/StageWrapperBuilder.java      |    5 +-
 .../server/state/stack/upgrade/TaskWrapper.java |   27 +-
 .../state/stack/upgrade/TaskWrapperBuilder.java |   10 +-
 .../server/upgrade/UpgradeCatalog210.java       |   10 +-
 .../server/upgrade/UpgradeCatalog213.java       |  526 +++++++-
 .../ambari/server/utils/SecretReference.java    |   77 ++
 ambari-server/src/main/python/bootstrap.py      |   66 +-
 .../main/resources/Ambari-DDL-MySQL-CREATE.sql  |  132 +-
 .../main/resources/Ambari-DDL-Oracle-CREATE.sql |    2 +
 .../resources/Ambari-DDL-Postgres-CREATE.sql    |    2 +
 .../Ambari-DDL-Postgres-EMBEDDED-CREATE.sql     |    2 +
 .../resources/Ambari-DDL-SQLAnywhere-CREATE.sql |    2 +
 .../resources/Ambari-DDL-SQLServer-CREATE.sql   |    2 +
 .../1.6.1.2.2.0/configuration/accumulo-env.xml  |   16 +
 .../1.6.1.2.2.0/configuration/accumulo-site.xml |   24 +
 .../0.1.0/configuration/ams-site.xml            |   39 +
 .../0.5.0.2.1/configuration/falcon-env.xml      |    9 +
 .../configuration/falcon-startup.properties.xml |   24 +
 .../FLUME/1.4.0.2.0/configuration/flume-env.xml |    5 +
 .../HAWQ/2.0.0.0/configuration/gpcheck-env.xml  |   86 ++
 .../2.0.0.0/configuration/hawq-limits-env.xml   |   46 +
 .../HAWQ/2.0.0.0/configuration/hawq-site.xml    |  167 +++
 .../2.0.0.0/configuration/hawq-sysctl-env.xml   |  247 ++++
 .../common-services/HAWQ/2.0.0.0/metainfo.xml   |  129 ++
 .../HAWQ/2.0.0.0/package/scripts/common.py      |  283 +++++
 .../HAWQ/2.0.0.0/package/scripts/constants.py   |   61 +
 .../HAWQ/2.0.0.0/package/scripts/hawqmaster.py  |   55 +
 .../HAWQ/2.0.0.0/package/scripts/hawqsegment.py |  102 ++
 .../HAWQ/2.0.0.0/package/scripts/hawqstandby.py |   58 +
 .../HAWQ/2.0.0.0/package/scripts/hawqstatus.py  |   64 +
 .../2.0.0.0/package/scripts/master_helper.py    |  194 +++
 .../HAWQ/2.0.0.0/package/scripts/params.py      |   92 ++
 .../2.0.0.0/package/scripts/service_check.py    |  102 ++
 .../HAWQ/2.0.0.0/package/scripts/utils.py       |  108 ++
 .../2.0.0.0/package/templates/hawq-hosts.j2     |    5 +
 .../package/templates/hawq-profile.sh.j2        |    8 +
 .../HAWQ/2.0.0.0/package/templates/slaves.j2    |    3 +
 .../0.96.0.2.0/configuration/hbase-env.xml      |    6 +
 .../0.96.0.2.0/configuration/hbase-site.xml     |   16 +
 .../HBASE/0.96.0.2.0/package/scripts/params.py  |    3 +-
 .../package/scripts/setup_ranger_hbase.py       |    7 +-
 .../HDFS/2.1.0.2.0/configuration/core-site.xml  |    3 +
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml |    3 +
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  |   20 +
 .../HDFS/2.1.0.2.0/package/scripts/datanode.py  |   40 +-
 .../package/scripts/datanode_upgrade.py         |   27 +-
 .../2.1.0.2.0/package/scripts/hdfs_client.py    |    6 +-
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |  124 +-
 .../2.1.0.2.0/package/scripts/journalnode.py    |   15 +-
 .../package/scripts/journalnode_upgrade.py      |    2 +-
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py  |  115 +-
 .../package/scripts/namenode_ha_state.py        |   27 +
 .../package/scripts/namenode_upgrade.py         |  166 ++-
 .../2.1.0.2.0/package/scripts/nfsgateway.py     |    6 +-
 .../HDFS/2.1.0.2.0/package/scripts/params.py    |    1 +
 .../2.1.0.2.0/package/scripts/params_linux.py   |    8 +-
 .../package/scripts/setup_ranger_hdfs.py        |   11 +-
 .../HDFS/2.1.0.2.0/package/scripts/snamenode.py |    8 +-
 .../HDFS/2.1.0.2.0/package/scripts/utils.py     |   30 +
 .../2.1.0.2.0/package/scripts/zkfc_slave.py     |    4 +-
 .../HIVE/0.12.0.2.0/configuration/hive-env.xml  |    4 +
 .../HIVE/0.12.0.2.0/configuration/hive-site.xml |    9 +
 .../0.12.0.2.0/configuration/webhcat-site.xml   |    3 +
 .../HIVE/0.12.0.2.0/package/scripts/params.py   |    1 +
 .../package/scripts/setup_ranger_hive.py        |    9 +-
 .../0.8.1.2.2/configuration/kafka-broker.xml    |    6 +
 .../KAFKA/0.8.1.2.2/configuration/kafka-env.xml |    1 +
 .../0.8.1.2.2/configuration/kafka-log4j.xml     |    6 +
 .../KAFKA/0.8.1.2.2/package/scripts/params.py   |    1 +
 .../package/scripts/setup_ranger_kafka.py       |    8 +-
 .../1.10.3-10/configuration/kerberos-env.xml    |   15 +
 .../1.10.3-10/configuration/krb5-conf.xml       |    3 +
 .../KNOX/0.5.0.2.2/configuration/knox-env.xml   |    1 +
 .../ranger-knox-plugin-properties.xml           |    7 +
 .../KNOX/0.5.0.2.2/configuration/topology.xml   |    6 +
 .../KNOX/0.5.0.2.2/package/scripts/params.py    |    3 +-
 .../package/scripts/setup_ranger_knox.py        |    9 +-
 .../OOZIE/4.0.0.2.0/configuration/oozie-env.xml |    4 +
 .../common-services/PXF/3.0.0.0/metainfo.xml    |   71 ++
 .../PXF/3.0.0.0/package/scripts/pxfservice.py   |   41 +
 .../RANGER_KMS/0.5.0.2.3/package/scripts/kms.py |    1 -
 .../0.60.0.2.2/package/scripts/params_linux.py  |   50 +-
 .../0.60.0.2.2/package/scripts/service_check.py |    5 +
 .../SLIDER/0.60.0.2.2/package/scripts/slider.py |    5 +
 .../STORM/0.9.1.2.1/configuration/storm-env.xml |   12 +
 .../0.9.1.2.1/configuration/storm-site.xml      |   78 ++
 .../STORM/0.9.1.2.1/package/scripts/params.py   |    2 +-
 .../0.9.1.2.1/package/scripts/params_linux.py   |    5 +-
 .../package/scripts/setup_ranger_storm.py       |    7 +-
 .../STORM/0.9.1.2.1/package/scripts/storm.py    |   12 +-
 .../TEZ/0.4.0.2.1/configuration/tez-site.xml    |   18 +
 .../configuration-mapred/mapred-env.xml         |    3 +
 .../YARN/2.1.0.2.0/configuration/yarn-env.xml   |    8 +
 .../YARN/2.1.0.2.0/configuration/yarn-site.xml  |    9 +
 .../scripts/application_timeline_server.py      |    8 +-
 .../2.1.0.2.0/package/scripts/historyserver.py  |   14 +-
 .../package/scripts/mapreduce2_client.py        |    2 +-
 .../2.1.0.2.0/package/scripts/nodemanager.py    |   12 +-
 .../YARN/2.1.0.2.0/package/scripts/params.py    |    2 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |    2 +-
 .../package/scripts/resourcemanager.py          |    8 +-
 .../package/scripts/setup_ranger_yarn.py        |   10 +-
 .../2.1.0.2.0/package/scripts/yarn_client.py    |    2 +-
 .../3.4.5.2.0/configuration/zoo.cfg.xml         |    7 +
 .../3.4.5.2.0/configuration/zookeeper-env.xml   |    2 +
 .../3.4.5.2.0/package/scripts/zookeeper.py      |    6 +-
 .../package/scripts/zookeeper_client.py         |    8 +-
 .../package/scripts/zookeeper_server.py         |   25 +-
 .../package/scripts/zookeeper_service.py        |    4 +-
 .../BIGTOP/0.8/configuration/cluster-env.xml    |    7 +-
 .../services/YARN/configuration/yarn-site.xml   |   15 +
 .../ZOOKEEPER/configuration/zookeeper-log4j.xml |    4 +-
 .../HDP/2.0.6/configuration/cluster-env.xml     |    4 +-
 .../stacks/HDP/2.0.6/services/stack_advisor.py  |   29 +-
 .../services/HIVE/configuration/hive-site.xml   |    8 +
 .../services/YARN/configuration/yarn-env.xml    |    1 +
 .../services/YARN/configuration/yarn-site.xml   |   15 +
 .../HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml |  133 +-
 .../stacks/HDP/2.2/role_command_order.json      |   13 +-
 .../configuration/falcon-startup.properties.xml |    9 +
 .../services/HBASE/configuration/hbase-site.xml |   77 ++
 .../ranger-hbase-plugin-properties.xml          |   11 +
 .../ranger-hdfs-plugin-properties.xml           |    7 +
 .../services/HIVE/configuration/hive-site.xml   |    8 +
 .../ranger-hive-plugin-properties.xml           |    9 +
 .../ranger-storm-plugin-properties.xml          |    7 +
 .../services/STORM/configuration/storm-site.xml |   15 +
 .../2.2/services/TEZ/configuration/tez-site.xml |    3 +
 .../services/YARN/configuration/yarn-site.xml   |   12 +
 .../stacks/HDP/2.2/services/stack_advisor.py    |  213 +++-
 .../stacks/HDP/2.2/upgrades/config-upgrade.xml  |   11 +
 .../HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml |   13 +
 .../HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml |  692 +++++++++++
 .../stacks/HDP/2.2/upgrades/upgrade-2.2.xml     |    2 +-
 .../stacks/HDP/2.2/upgrades/upgrade-2.3.xml     |    8 +-
 .../services/HBASE/configuration/hbase-site.xml |    1 +
 .../services/HDFS/configuration/hadoop-env.xml  |    1 +
 .../services/HDFS/configuration/hdfs-site.xml   |   18 +
 .../KAFKA/configuration/kafka-broker.xml        |   13 +
 .../ranger-kafka-plugin-properties.xml          |    1 +
 .../RANGER/configuration/ranger-admin-site.xml  |    1 +
 .../services/STORM/configuration/storm-env.xml  |    5 +
 .../services/STORM/configuration/storm-site.xml |    1 +
 .../ranger-yarn-plugin-properties.xml           |    1 +
 .../services/YARN/configuration/yarn-site.xml   |   24 +
 .../stacks/HDP/2.3/services/stack_advisor.py    |  119 +-
 .../stacks/HDP/2.3/upgrades/config-upgrade.xml  |   13 +
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml |  519 ++++++++
 .../stacks/HDP/2.3/upgrades/upgrade-2.3.xml     |   18 +-
 .../HDPWIN/2.1/configuration/cluster-env.xml    |    2 +-
 .../catalog/UpgradeCatalog_2.1_to_2.3.json      |   21 +-
 .../catalog/UpgradeCatalog_2.2_to_2.3.json      |   21 +-
 .../checks/HostsMasterMaintenanceCheckTest.java |    4 +
 .../checks/HostsRepositoryVersionCheckTest.java |    5 +
 .../AmbariManagementControllerTest.java         |  145 +++
 .../internal/UpgradeResourceProviderTest.java   |   52 +-
 .../timeline/MetricsPaddingMethodTest.java      |    2 +-
 .../cache/TimelineMetricCacheSizingTest.java    |  110 ++
 .../timeline/cache/TimelineMetricCacheTest.java |    4 +-
 .../ambari/server/orm/DBAccessorImplTest.java   |   21 +
 .../server/orm/dao/HostRoleCommandDAOTest.java  |   40 +
 .../ambari/server/orm/dao/UpgradeDAOTest.java   |   31 +
 .../ambari/server/stack/StackManagerTest.java   |  105 ++
 .../server/state/stack/UpgradePackTest.java     |   50 +-
 .../stack/upgrade/StageWrapperBuilderTest.java  |    3 +-
 .../server/upgrade/UpgradeCatalog213Test.java   |  413 ++++--
 ambari-server/src/test/python/TestBootstrap.py  |  102 +-
 .../python/stacks/2.0.6/HDFS/test_datanode.py   |   22 +-
 .../stacks/2.0.6/HDFS/test_hdfs_client.py       |    8 +-
 .../stacks/2.0.6/HDFS/test_journalnode.py       |   18 +-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |   24 +-
 .../python/stacks/2.0.6/HDFS/test_nfsgateway.py |    4 +-
 .../stacks/2.0.6/YARN/test_historyserver.py     |   10 +-
 .../stacks/2.0.6/YARN/test_mapreduce2_client.py |    4 +-
 .../stacks/2.0.6/YARN/test_nodemanager.py       |   16 +-
 .../stacks/2.0.6/YARN/test_resourcemanager.py   |    4 +-
 .../stacks/2.0.6/YARN/test_yarn_client.py       |    4 +-
 .../2.0.6/ZOOKEEPER/test_zookeeper_client.py    |    8 +-
 .../2.0.6/ZOOKEEPER/test_zookeeper_server.py    |   12 +-
 .../stacks/2.0.6/common/test_stack_advisor.py   |   82 ++
 .../stacks/2.1/YARN/test_apptimelineserver.py   |    4 +-
 .../stacks/2.1/configs/default-storm-start.json |    3 +-
 .../test/python/stacks/2.1/configs/default.json |    3 +-
 .../stacks/2.1/configs/secured-storm-start.json |    3 +-
 .../test/python/stacks/2.1/configs/secured.json |    3 +-
 .../stacks/2.2/SLIDER/test_slider_client.py     |    8 +
 .../stacks/2.2/common/test_stack_advisor.py     |  375 +++++-
 .../stacks/2.3/common/test_stack_advisor.py     |  290 +++++
 .../services/HDFS/configuration/hadoop-env.xml  |    4 +-
 .../services/HDFS/configuration/hdfs-site.xml   |    6 +
 .../services/HDFS/configuration/hadoop-env.xml  |    4 +-
 ambari-web/app/assets/test/tests.js             |    6 +
 ambari-web/app/config.js                        |    1 +
 .../global/background_operations_controller.js  |   18 +-
 .../global/user_settings_controller.js          |   74 +-
 ambari-web/app/controllers/main.js              |   52 +-
 .../nameNode/step6_controller.js                |   69 +-
 .../main/admin/stack_and_upgrade_controller.js  |    6 +-
 .../alerts/manage_alert_groups_controller.js    |    2 +
 .../controllers/main/service/info/configs.js    |    2 +-
 ambari-web/app/controllers/wizard.js            |    7 +-
 .../app/controllers/wizard/step2_controller.js  |   25 +-
 .../app/controllers/wizard/step3_controller.js  |    2 +
 .../app/controllers/wizard/step6_controller.js  |   28 +-
 .../app/controllers/wizard/step7_controller.js  |   17 +-
 ambari-web/app/data/HDP2.2/site_properties.js   |   67 -
 ambari-web/app/data/HDP2.3/site_properties.js   |    9 +-
 .../app/data/HDP2/gluster_fs_properties.js      |   94 ++
 ambari-web/app/data/HDP2/site_properties.js     |  571 +--------
 .../mappers/alert_definition_summary_mapper.js  |   10 +-
 .../app/mappers/alert_definitions_mapper.js     |   16 +-
 .../configs/stack_config_properties_mapper.js   |    2 +-
 ambari-web/app/mappers/configs/themes_mapper.js |   64 +-
 ambari-web/app/messages.js                      |   14 +-
 .../mixins/common/configs/configs_comparator.js |    9 +-
 .../app/mixins/common/configs/configs_loader.js |    1 +
 .../mixins/common/configs/enhanced_configs.js   |   45 +-
 ambari-web/app/mixins/common/serverValidator.js |   13 +-
 ambari-web/app/models.js                        |    2 +-
 .../app/models/configs/config_condition.js      |   65 -
 ambari-web/app/models/configs/config_group.js   |    5 +-
 .../models/configs/objects/service_config.js    |    2 +-
 .../configs/objects/service_config_property.js  |   39 +-
 .../app/models/configs/theme/sub_section.js     |    9 +-
 .../app/models/configs/theme/sub_section_tab.js |    9 +-
 .../app/models/configs/theme/theme_condition.js |   76 ++
 ambari-web/app/models/quick_links.js            |    8 +-
 .../app/models/stack_service_component.js       |    4 +-
 ambari-web/app/styles/alerts.less               |    4 +-
 ambari-web/app/styles/application.less          |   21 +-
 ambari-web/app/styles/common.less               |    6 +-
 .../app/templates/common/chart/linear_time.hbs  |   49 +-
 .../common/configs/config_history_flow.hbs      |    6 +-
 .../common/configs/service_config_category.hbs  |    2 +-
 .../common/form/manage_credentilas_form.hbs     |   29 +-
 ambari-web/app/templates/common/modal_popup.hbs |    2 +-
 .../config_recommendation_popup.hbs             |    2 +-
 ambari-web/app/templates/common/settings.hbs    |    2 +-
 ambari-web/app/templates/main/admin.hbs         |    4 +-
 .../app/templates/main/admin/kerberos.hbs       |    6 +-
 .../admin/stack_upgrade/edit_repositories.hbs   |    6 +-
 .../admin/stack_upgrade/upgrade_version_box.hbs |    6 +-
 .../main/admin/stack_upgrade/versions.hbs       |    4 +-
 ambari-web/app/templates/main/alerts.hbs        |   10 +-
 .../alerts/manage_alert_notifications_popup.hbs |   26 +-
 .../templates/main/dashboard/config_history.hbs |   10 +-
 .../main/dashboard/widgets/hbase_links.hbs      |    8 +-
 .../main/dashboard/widgets/hdfs_links.hbs       |   12 +-
 .../main/dashboard/widgets/yarn_links.hbs       |    4 +-
 .../app/templates/main/host/addHost/step4.hbs   |    4 +-
 .../main/host/details/host_component.hbs        |    2 +-
 ambari-web/app/templates/main/host/summary.hbs  |    2 +-
 .../app/templates/main/service/info/configs.hbs |    4 +-
 .../app/templates/main/service/info/summary.hbs |    2 +-
 .../service/info/summary/client_components.hbs  |    2 +-
 .../service/info/summary/master_components.hbs  |    4 +-
 .../service/info/summary/slave_components.hbs   |    4 +-
 ambari-web/app/templates/main/service/item.hbs  |    2 +-
 .../manage_configuration_groups_popup.hbs       |    4 +-
 .../app/templates/main/service/reassign.hbs     |   12 +-
 .../templates/main/service/services/flume.hbs   |   20 +-
 .../templates/main/service/services/hbase.hbs   |   28 +-
 .../templates/main/service/services/hdfs.hbs    |   60 +-
 .../templates/main/service/services/ranger.hbs  |    4 +-
 .../templates/main/service/services/storm.hbs   |   24 +-
 .../templates/main/service/services/yarn.hbs    |   45 +-
 ambari-web/app/templates/wizard/step2.hbs       |   12 +
 ambari-web/app/templates/wizard/step4.hbs       |   10 +-
 ambari-web/app/templates/wizard/step6.hbs       |   10 +-
 ambari-web/app/templates/wizard/step9.hbs       |    4 +-
 ambari-web/app/utils/ajax/ajax.js               |   17 +
 ambari-web/app/utils/blueprint.js               |    8 +-
 ambari-web/app/utils/config.js                  |  126 +-
 .../app/utils/configs/config_property_helper.js |   17 +-
 .../configs/modification_handlers/hbase.js      |  107 --
 .../utils/configs/modification_handlers/hdfs.js |   55 -
 .../configs/modification_handlers/kafka.js      |   71 --
 .../utils/configs/modification_handlers/knox.js |   67 -
 .../configs/modification_handlers/storm.js      |   70 --
 .../utils/configs/modification_handlers/yarn.js |   71 --
 ambari-web/app/utils/credentials.js             |   22 +-
 ambari-web/app/utils/date/timezone.js           |   39 +-
 ambari-web/app/utils/helper.js                  |   91 +-
 ambari-web/app/utils/hosts.js                   |    2 +
 .../common/assign_master_components_view.js     |   19 +
 .../common/configs/compare_property_view.js     |    1 +
 .../common/configs/overriddenProperty_view.js   |    1 +
 .../configs/service_config_layout_tab_view.js   |    3 +-
 .../views/common/configs/service_config_view.js |    3 +-
 .../configs/service_configs_by_category_view.js |    2 +-
 .../configs/widgets/config_widget_view.js       |   31 +-
 .../common/form/manage_credentials_form_view.js |   17 +-
 ambari-web/app/views/common/modal_popup.js      |    3 +-
 .../dependent_configs_list_popup.js             |    1 +
 .../manage_kdc_credentials_popup.js             |   24 +-
 .../app/views/common/quick_view_link_view.js    |    5 +
 .../highAvailability/nameNode/step6_view.js     |   17 +-
 .../admin/stack_upgrade/upgrade_wizard_view.js  |    4 +-
 ambari-web/app/views/main/host/metrics/disk.js  |    2 +-
 .../info/metrics/flume/channel_size_mma.js      |   17 +-
 .../service/info/metrics/flume/channel_sum.js   |   10 +-
 .../info/metrics/flume/flume_incoming_mma.js    |   17 +-
 .../info/metrics/flume/flume_incoming_sum.js    |   10 +-
 .../info/metrics/flume/flume_outgoing_mma.js    |   17 +-
 .../info/metrics/flume/flume_outgoing_sum.js    |   10 +-
 .../wizard/step3/hostWarningPopupBody_view.js   |    3 +-
 .../global/user_settings_controller_test.js     |   30 +-
 .../admin/kerberos/step2_controller_test.js     |   13 +-
 .../admin/stack_and_upgrade_controller_test.js  |    4 +-
 .../main/service/info/config_test.js            |    8 +-
 .../test/controllers/wizard/step2_test.js       |   45 +
 .../test/controllers/wizard/step3_test.js       |    4 +
 .../test/data/HDP2.2/site_properties_test.js    |    9 +
 .../test/data/HDP2.3/site_properties_test.js    |   13 +-
 .../test/data/HDP2/site_properties_test.js      |    9 +
 .../kdc_credentials_controller_mixin_test.js    |   72 +-
 .../test/mock_data_setup/configs_mock_data.js   |    2 +-
 .../test/models/stack_service_component_test.js |   64 +-
 ambari-web/test/service_components.js           |  165 +++
 ambari-web/test/utils/config_test.js            |   17 +-
 .../configs/config_property_helper_test.js      |    8 +-
 ambari-web/test/utils/date/timezone_test.js     |   36 +-
 ambari-web/test/utils/helper_test.js            |   88 ++
 .../service_configs_by_category_view_test.js    |    2 +-
 .../form/manage_kdc_credentials_form_test.js    |    4 +-
 .../nameNode/step6_view_test.js                 |   23 +-
 .../stack_upgrade/upgrade_wizard_view_test.js   |    8 +-
 .../info/metrics/flume/channel_size_mma_test.js |  142 +++
 .../info/metrics/flume/channel_sum_test.js      |  108 ++
 .../metrics/flume/flume_incoming_mma_test.js    |  142 +++
 .../metrics/flume/flume_incoming_sum_test.js    |  108 ++
 .../metrics/flume/flume_outgoing_mma_test.js    |  142 +++
 .../metrics/flume/flume_outgoing_sum_test.js    |  108 ++
 .../main/resources/ui/app/controllers/queues.js |   36 +-
 .../ui/app/templates/capacityEditForm.hbs       |    4 +-
 .../app/templates/components/queueListItem.hbs  |    6 +
 .../ui/app/templates/schedulerPanel.hbs         |    2 +-
 contrib/views/hive/pom.xml                      |   12 +-
 .../ambari/view/hive/client/Connection.java     |  262 +++-
 .../view/hive/client/ConnectionFactory.java     |   11 +-
 .../hive/client/HttpBasicAuthInterceptor.java   |   55 +
 .../client/HttpKerberosRequestInterceptor.java  |   72 ++
 .../hive/client/HttpRequestInterceptorBase.java |   88 ++
 .../apache/ambari/view/hive/client/Utils.java   |   47 +-
 contrib/views/hive/src/main/resources/view.xml  |   30 +
 .../ambari/view/hive/client/ConnectionTest.java |    6 +-
 .../hive/resources/jobs/JobLDAPServiceTest.java |    5 +-
 .../resources/ui/scripts/init-ambari-view.js    |    6 +-
 433 files changed, 15758 insertions(+), 2682 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b25c8a84/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index df5d199,2a1431d..5f216db
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@@ -491,15 -472,9 +491,15 @@@ public class ClusterStackVersionResourc
      Set<String> servicesOnHost = new HashSet<String>();
      List<ServiceComponentHost> components = cluster.getServiceComponentHosts(host.getHostName());
      for (ServiceComponentHost component : components) {
 -      servicesOnHost.add(component.getServiceName());
 +      if (repoServices.isEmpty() || repoServices.contains(component.getServiceName())) {
 +        servicesOnHost.add(component.getServiceName());
 +      }
 +    }
 +
 +    if (servicesOnHost.isEmpty()) {
 +      return null;
      }
- 
+     List<String> blacklistedPackagePrefixes = configuration.getRollingUpgradeSkipPackagesPrefixes();
      for (String serviceName : servicesOnHost) {
        ServiceInfo info;
        try {

http://git-wip-us.apache.org/repos/asf/ambari/blob/b25c8a84/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 8124326,279b31f..2c781ff
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@@ -1251,19 -1255,9 +1257,19 @@@ public class ClusterImpl implements Clu
        return RepositoryVersionState.INSTALLING;
      }
  
 +    if (totalNotRequired > 0) {
 +      if (totalInstalling + totalInstalled + totalNotRequired == totalHosts) {
 +        return RepositoryVersionState.INSTALLING;
 +      }
 +
 +      if (totalInstalled + totalNotRequired == totalHosts) {
 +        return RepositoryVersionState.INSTALLED;
 +      }
 +    }
 +
      // Also returns when have a mix of CURRENT and INSTALLING|INSTALLED|UPGRADING|UPGRADED
      LOG.warn("have a mix of CURRENT and INSTALLING|INSTALLED|UPGRADING|UPGRADED host versions, " +
-             "returning OUT_OF_SYNC as cluster version. Host version states: " + stateToHosts.toString());
+         "returning OUT_OF_SYNC as cluster version. Host version states: " + stateToHosts.toString());
      return RepositoryVersionState.OUT_OF_SYNC;
    }
  

http://git-wip-us.apache.org/repos/asf/ambari/blob/b25c8a84/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/b25c8a84/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/b25c8a84/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/b25c8a84/ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/b25c8a84/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/b25c8a84/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
----------------------------------------------------------------------


[33/50] [abbrv] ambari git commit: AMBARI-13409. AMS Load Simulator updates. (Aravindan Vijayan via swagle)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/HBASE.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/HBASE.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/HBASE.dat
new file mode 100644
index 0000000..ae60458
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/HBASE.dat
@@ -0,0 +1,47 @@
+dfs.FSNamesystem.MissingReplOneBlocks
+dfs.FSNamesystem.TransactionsSinceLastCheckpoint
+dfs.FSNamesystem.MillisSinceLastLoadedEdits
+dfs.FSNamesystem.SnapshottableDirectories
+master.Master.QueueCallTime_median
+dfs.FSNamesystem.LastCheckpointTime
+dfs.FSNamesystem.TotalFiles
+dfs.FSNamesystem.ExpiredHeartbeats
+dfs.FSNamesystem.PostponedMisreplicatedBlocks
+dfs.FSNamesystem.LastWrittenTransactionId
+jvm.JvmMetrics.MemHeapCommittedM
+dfs.FSNamesystem.Snapshots
+dfs.FSNamesystem.TransactionsSinceLastLogRoll
+master.Server.averageLoad
+jvm.JvmMetrics.MemHeapUsedM
+master.AssignmentManger.ritCount
+dfs.FSNamesystem.PendingDataNodeMessageCount
+dfs.FSNamesystem.StaleDataNodes
+|hostname|
+regionserver.Server.percentFilesLocal
+||
+regionserver.Server.Append_num_ops._sum
+pkts_out._avg
+cpu_wio._sum
+regionserver.Server.Delete_num_ops._sum
+regionserver.Server.updatesBlockedTime._sum
+regionserver.Server.Delete_95th_percentile._max
+pkts_in._avg
+regionserver.Server.Get_num_ops._sum
+cpu_user._sum
+regionserver.RegionServer.numActiveHandler._sum
+cpu_system._sum
+regionserver.Server.Increment_num_ops._sum
+regionserver.RegionServer.numCallsInGeneralQueue._sum
+cpu_nice._sum
+regionserver.Server.Get_95th_percentile._max
+regionserver.Server.Mutate_num_ops._sum
+regionserver.Server.ScanNext_num_ops._sum
+cpu_idle._sum
+regionserver.Server.Increment_95th_percentile._max
+regionserver.Server.Mutate_95th_percentile._max
+read_bps._sum
+write_bps._sum
+regionserver.Server.Append_95th_percentile._max
+regionserver.RegionServer.numOpenConnections._sum
+regionserver.Server.ScanNext_95th_percentile._max
+|startTime|endTime|
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/HOST.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/HOST.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/HOST.dat
new file mode 100644
index 0000000..96e7ddd
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/HOST.dat
@@ -0,0 +1,79 @@
+cpu_wio
+|hostname|
+cpu_wio
+cpu_user
+mem_cached
+mem_free
+cpu_nice
+cpu_idle
+cpu_system
+mem_total
+|hostname|startTime|endTime|
+write_time._sum
+disk_free._min
+disk_free._avg
+read_count._max
+write_bytes._avg
+write_bytes._min
+disk_free
+load_one
+disk_total._max
+write_time._avg
+write_time._min
+write_count
+write_bytes._sum
+write_count._max
+read_bytes._min
+read_bytes._avg
+read_bytes._sum
+disk_free._sum
+read_count
+read_time._avg
+read_time._min
+cpu_user
+write_bytes
+read_time._sum
+write_time._max
+cpu_system
+write_count._avg
+write_count._min
+read_bytes
+mem_total
+read_time._max
+read_time
+disk_total
+write_count._sum
+read_count._avg
+read_count._min
+read_bytes._max
+disk_free._max
+mem_free
+disk_total._sum
+write_time
+read_count._sum
+write_bytes._max
+disk_total._min
+disk_total._avg
+|hostname|startTime|endTime|
+proc_total
+pkts_in
+cpu_wio
+cpu_user
+bytes_in
+mem_cached
+cpu_aidle
+load_five
+mem_buffers
+cpu_idle
+load_one
+cpu_system
+proc_run
+load_fifteen
+disk_total
+bytes_out
+mem_shared
+mem_free
+cpu_nice
+swap_free
+pkts_out
+|hostname|startTime|endTime|
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/KAFKA_BROKER.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/KAFKA_BROKER.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/KAFKA_BROKER.dat
new file mode 100644
index 0000000..c8bb64b
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/KAFKA_BROKER.dat
@@ -0,0 +1,16 @@
+kafka.controller.KafkaController.ActiveControllerCount
+|startTime|endTime|
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.1MinuteRate
+kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.1MinuteRate
+|startTime|endTime|
+kafka.server.BrokerTopicMetrics.BytesInPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.MessagesInPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.BytesOutPerSec.1MinuteRate
+|startTime|endTime|
+kafka.server.ReplicaManager.UnderReplicatedPartitions
+kafka.server.ReplicaManager.PartitionCount
+|startTime|endTime|
+kafka.server.ReplicaFetcherManager.MaxLag.clientId.Replica
+|startTime|endTime|
+kafka.log.LogFlushStats.LogFlushRateAndTimeMs.1MinuteRate
+|startTime|endTime|
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/NAMENODE.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/NAMENODE.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/NAMENODE.dat
new file mode 100644
index 0000000..81360c5
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/NAMENODE.dat
@@ -0,0 +1,30 @@
+dfs.FSNamesystem.MissingReplOneBlocks
+dfs.FSNamesystem.TransactionsSinceLastCheckpoint
+dfs.FSNamesystem.MillisSinceLastLoadedEdits
+dfs.FSNamesystem.SnapshottableDirectories
+master.Master.QueueCallTime_median
+dfs.FSNamesystem.LastCheckpointTime
+dfs.FSNamesystem.TotalFiles
+dfs.FSNamesystem.ExpiredHeartbeats
+dfs.FSNamesystem.PostponedMisreplicatedBlocks
+dfs.FSNamesystem.LastWrittenTransactionId
+jvm.JvmMetrics.MemHeapCommittedM
+dfs.FSNamesystem.Snapshots
+dfs.FSNamesystem.TransactionsSinceLastLogRoll
+master.Server.averageLoad
+jvm.JvmMetrics.MemHeapUsedM
+master.AssignmentManger.ritCount
+dfs.FSNamesystem.PendingDataNodeMessageCount
+dfs.FSNamesystem.StaleDataNodes
+|hostname|
+dfs.FSNamesystem.UnderReplicatedBlocks
+||
+rpc.rpc.NumOpenConnections
+rpc.rpc.RpcQueueTimeAvgTime
+jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep
+jvm.JvmMetrics.GcCountConcurrentMarkSweep
+jvm.JvmMetrics.MemHeapCommittedM
+rpc.rpc.RpcProcessingTimeAvgTime
+jvm.JvmMetrics.MemHeapUsedM
+jvm.JvmMetrics.GcCount
+|hostname|startTime|endTime|
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/NIMBUS.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/NIMBUS.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/NIMBUS.dat
new file mode 100644
index 0000000..c407266
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/NIMBUS.dat
@@ -0,0 +1,28 @@
+dfs.FSNamesystem.MissingReplOneBlocks
+dfs.FSNamesystem.TransactionsSinceLastCheckpoint
+dfs.FSNamesystem.MillisSinceLastLoadedEdits
+dfs.FSNamesystem.SnapshottableDirectories
+master.Master.QueueCallTime_median
+dfs.FSNamesystem.LastCheckpointTime
+dfs.FSNamesystem.TotalFiles
+dfs.FSNamesystem.ExpiredHeartbeats
+dfs.FSNamesystem.PostponedMisreplicatedBlocks
+dfs.FSNamesystem.LastWrittenTransactionId
+jvm.JvmMetrics.MemHeapCommittedM
+dfs.FSNamesystem.Snapshots
+dfs.FSNamesystem.TransactionsSinceLastLogRoll
+master.Server.averageLoad
+jvm.JvmMetrics.MemHeapUsedM,master.AssignmentManger.ritCount
+dfs.FSNamesystem.PendingDataNodeMessageCount
+dfs.FSNamesystem.StaleDataNodes
+|hostname|
+Topologies
+|startTime|endTime|
+Used Slots
+Free Slots
+Total Slots
+|startTime|endTime|
+Total Executors
+|startTime|endTime|
+Total Tasks
+|startTime|endTime|
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/NODEMANAGER.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/NODEMANAGER.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/NODEMANAGER.dat
new file mode 100644
index 0000000..971c37f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/NODEMANAGER.dat
@@ -0,0 +1,33 @@
+cpu_system._sum
+pkts_out._avg
+cpu_nice._sum
+mem_free._avg
+cpu_wio._sum
+yarn.NodeManagerMetrics.ContainersKilled._sum
+yarn.NodeManagerMetrics.ContainersRunning._sum
+pkts_in._avg
+cpu_idle._sum
+mem_cached._avg
+cpu_user._sum
+yarn.NodeManagerMetrics.ContainersFailed._sum
+yarn.NodeManagerMetrics.ContainersIniting._sum
+yarn.NodeManagerMetrics.ContainersCompleted._sum
+yarn.NodeManagerMetrics.ContainersLaunched._sum
+mem_total._avg
+|startTime|endTime|
+yarn.NodeManagerMetrics.ContainersIniting
+yarn.NodeManagerMetrics.ContainersRunning
+yarn.NodeManagerMetrics.ContainersKilled
+yarn.NodeManagerMetrics.ContainersCompleted
+yarn.NodeManagerMetrics.ContainersLaunched
+yarn.NodeManagerMetrics.ContainersFailed
+|hostname|
+yarn.NodeManagerMetrics.AllocatedContainers
+|hostname|
+yarn.NodeManagerMetrics.AllocatedGB
+|hostname|
+yarn.NodeManagerMetrics.AllocatedVCores
+|hostname|
+yarn.NodeManagerMetrics.AllocatedGB
+yarn.NodeManagerMetrics.AvailableGB
+|hostname|
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2f306d9/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/RESOURCEMANAGER.dat
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/RESOURCEMANAGER.dat b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/RESOURCEMANAGER.dat
new file mode 100644
index 0000000..8c4aec5
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/ui_metrics_def/RESOURCEMANAGER.dat
@@ -0,0 +1,11 @@
+yarn.QueueMetrics.Queue=root.AppsCompleted
+yarn.QueueMetrics.Queue=root.AppsRunning
+yarn.QueueMetrics.Queue=root.AppsFailed
+yarn.QueueMetrics.Queue=root.AllocatedMB
+yarn.QueueMetrics.Queue=root.AppsSubmitted
+yarn.QueueMetrics.Queue=root.AvailableVCores
+yarn.QueueMetrics.Queue=root.AppsPending
+yarn.QueueMetrics.Queue=root.AllocatedVCores
+yarn.QueueMetrics.Queue=root.AvailableMB
+yarn.QueueMetrics.Queue=root.AppsKilled
+|hostname|startTime|endTime|
\ No newline at end of file


[40/50] [abbrv] ambari git commit: AMBARI-12702: Express Upgrade: Parallelize restarts (jluniya)

Posted by nc...@apache.org.
AMBARI-12702: Express Upgrade: Parallelize restarts (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b4468ce4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b4468ce4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b4468ce4

Branch: refs/heads/branch-dev-patch-upgrade
Commit: b4468ce44b28bc72a64681e6dc7617b4a2812792
Parents: 049934f
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Thu Oct 22 19:31:32 2015 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Thu Oct 22 19:31:32 2015 -0700

----------------------------------------------------------------------
 .../ambari/server/state/UpgradeHelper.java      | 38 +++++++++++---------
 .../state/stack/upgrade/ClusterGrouping.java    |  2 +-
 .../state/stack/upgrade/ColocatedGrouping.java  |  2 +-
 .../server/state/stack/upgrade/Grouping.java    | 19 +++++++---
 .../stack/upgrade/ServiceCheckGrouping.java     |  2 +-
 .../stack/upgrade/StageWrapperBuilder.java      |  2 +-
 .../stack/upgrade/StageWrapperBuilderTest.java  |  2 +-
 7 files changed, 40 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b4468ce4/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
index fd92d21..ddfc36e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
@@ -286,10 +286,26 @@ public class UpgradeHelper {
         groupHolder.skippable = true;
       }
 
+      // Attempt to get the function of the group, during a NonRolling Upgrade
+      Task.Type functionName = null;
+      boolean scheduleInParallel = false;
       // NonRolling defaults to not performing service checks on a group.
       // Of course, a Service Check Group does indeed run them.
       if (upgradePack.getType() == UpgradeType.NON_ROLLING) {
         group.performServiceCheck = false;
+
+        if (RestartGrouping.class.isInstance(group)) {
+          functionName = ((RestartGrouping) group).getFunction();
+          scheduleInParallel = true;
+        }
+        if (StartGrouping.class.isInstance(group)) {
+          functionName = ((StartGrouping) group).getFunction();
+          scheduleInParallel = true;
+        }
+        if (StopGrouping.class.isInstance(group)) {
+          functionName = ((StopGrouping) group).getFunction();
+          scheduleInParallel = true;
+        }
       }
 
       StageWrapperBuilder builder = group.getBuilder();
@@ -311,19 +327,7 @@ public class UpgradeHelper {
         if (upgradePack.getType() == UpgradeType.ROLLING && !allTasks.containsKey(service.serviceName)) {
           continue;
         }
-        
-        // Attempt to get the function of the group, during a NonRolling Upgrade
-        Task.Type functionName = null;
 
-        if (RestartGrouping.class.isInstance(group)) {
-          functionName = ((RestartGrouping) group).getFunction();
-        }
-        if (StartGrouping.class.isInstance(group)) {
-          functionName = ((StartGrouping) group).getFunction();
-        }
-        if (StopGrouping.class.isInstance(group)) {
-          functionName = ((StopGrouping) group).getFunction();
-        }
 
         for (String component : service.components) {
           // Rolling Upgrade has exactly one task for a Component.
@@ -392,7 +396,7 @@ public class UpgradeHelper {
                   hostsType.hosts = order;
 
                   builder.add(context, hostsType, service.serviceName,
-                      svc.isClientOnlyService(), pc, null);
+                      svc.isClientOnlyService(), pc, null, false);
                 }
                 break;
               case NON_ROLLING:
@@ -417,21 +421,21 @@ public class UpgradeHelper {
 
 
                   builder.add(context, ht1, service.serviceName,
-                      svc.isClientOnlyService(), pc, h1Params);
+                      svc.isClientOnlyService(), pc, h1Params, false);
 
                   builder.add(context, ht2, service.serviceName,
-                      svc.isClientOnlyService(), pc, h2Params);
+                      svc.isClientOnlyService(), pc, h2Params, false);
                 } else {
                   // If no NameNode HA, then don't need to change hostsType.hosts since there should be exactly one.
                   builder.add(context, hostsType, service.serviceName,
-                      svc.isClientOnlyService(), pc, null);
+                      svc.isClientOnlyService(), pc, null, false);
                 }
 
                 break;
             }
           } else {
             builder.add(context, hostsType, service.serviceName,
-                svc.isClientOnlyService(), pc, null);
+                svc.isClientOnlyService(), pc, null, scheduleInParallel);
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4468ce4/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
index 0e9d2c8..6137285 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
@@ -108,7 +108,7 @@ public class ClusterGrouping extends Grouping {
 
     @Override
     public void add(UpgradeContext ctx, HostsType hostsType, String service,
-        boolean clientOnly, ProcessingComponent pc, Map<String, String> params) {
+        boolean clientOnly, ProcessingComponent pc, Map<String, String> params, boolean scheduleInParallel) {
       // !!! no-op in this case
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4468ce4/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
index 11e9267..8218162 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
@@ -81,7 +81,7 @@ public class ColocatedGrouping extends Grouping {
 
     @Override
     public void add(UpgradeContext ctx, HostsType hostsType, String service,
-        boolean clientOnly, ProcessingComponent pc, Map<String, String> params) {
+        boolean clientOnly, ProcessingComponent pc, Map<String, String> params, boolean scheduleInParallel) {
 
       boolean forUpgrade = ctx.getDirection().isUpgrade();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4468ce4/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
index cd3ee68..fd54ed8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
@@ -89,7 +89,7 @@ public class Grouping {
      */
     @Override
     public void add(UpgradeContext ctx, HostsType hostsType, String service,
-       boolean clientOnly, ProcessingComponent pc, Map<String, String> params) {
+       boolean clientOnly, ProcessingComponent pc, Map<String, String> params, boolean scheduleInParallel) {
 
       boolean forUpgrade = ctx.getDirection().isUpgrade();
 
@@ -112,14 +112,23 @@ public class Grouping {
       // Add the processing component
       if (null != pc.tasks && 1 == pc.tasks.size()) {
         Task t = pc.tasks.get(0);
-
-        for (String hostName : hostsType.hosts) {
+        if(scheduleInParallel) {
+          // Create single stage for all
           StageWrapper stage = new StageWrapper(
               t.getStageWrapperType(),
-              getStageText(t.getActionVerb(), ctx.getComponentDisplay(service, pc.name), Collections.singleton(hostName)),
+              getStageText(t.getActionVerb(), ctx.getComponentDisplay(service, pc.name), hostsType.hosts),
               params,
-              new TaskWrapper(service, pc.name, Collections.singleton(hostName), params, t));
+              new TaskWrapper(service, pc.name, hostsType.hosts, params, t));
           m_stages.add(stage);
+        } else {
+          for (String hostName : hostsType.hosts) {
+            StageWrapper stage = new StageWrapper(
+                t.getStageWrapperType(),
+                getStageText(t.getActionVerb(), ctx.getComponentDisplay(service, pc.name), Collections.singleton(hostName)),
+                params,
+                new TaskWrapper(service, pc.name, Collections.singleton(hostName), params, t));
+            m_stages.add(stage);
+          }
         }
       }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4468ce4/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ServiceCheckGrouping.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ServiceCheckGrouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ServiceCheckGrouping.java
index 0033185..19fabe8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ServiceCheckGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ServiceCheckGrouping.java
@@ -103,7 +103,7 @@ public class ServiceCheckGrouping extends Grouping {
      */
     @Override
     public void add(UpgradeContext ctx, HostsType hostsType, String service,
-        boolean clientOnly, ProcessingComponent pc, Map<String, String> params) {
+        boolean clientOnly, ProcessingComponent pc, Map<String, String> params, boolean scheduleInParallel) {
       // !!! nothing to do here
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4468ce4/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilder.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilder.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilder.java
index 6ef0980..587ce55 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilder.java
@@ -71,7 +71,7 @@ public abstract class StageWrapperBuilder {
    *          additional parameters
    */
   public abstract void add(UpgradeContext upgradeContext, HostsType hostsType, String service,
-      boolean clientOnly, ProcessingComponent pc, Map<String, String> params);
+      boolean clientOnly, ProcessingComponent pc, Map<String, String> params, boolean scheduleInParallel);
 
   /**
    * Builds the stage wrappers, including any pre- and post-procesing that needs

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4468ce4/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
index 94a5336..6fcf7ce 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
@@ -126,7 +126,7 @@ public class StageWrapperBuilderTest {
      */
     @Override
     public void add(UpgradeContext upgradeContext, HostsType hostsType, String service,
-        boolean clientOnly, ProcessingComponent pc, Map<String, String> params) {
+        boolean clientOnly, ProcessingComponent pc, Map<String, String> params, boolean scheduleInParallel) {
     }
 
     /**


[29/50] [abbrv] ambari git commit: AMBARI-13478 Upgrade path on ORA 1.6.1 -> 2.0.1 -> 2.1.1 (dsen)

Posted by nc...@apache.org.
AMBARI-13478 Upgrade path on ORA 1.6.1 -> 2.0.1 -> 2.1.1 (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/bb7ca76d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/bb7ca76d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/bb7ca76d

Branch: refs/heads/branch-dev-patch-upgrade
Commit: bb7ca76db357b5c9781818278e008611f9369044
Parents: c4c8338
Author: Dmytro Sen <ds...@apache.org>
Authored: Thu Oct 22 20:51:46 2015 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Thu Oct 22 20:51:46 2015 +0300

----------------------------------------------------------------------
 .../ambari/server/orm/DBAccessorImpl.java       | 26 ++++++++++++++++++--
 .../server/upgrade/UpgradeCatalog210.java       | 10 +++++++-
 .../ambari/server/orm/DBAccessorImplTest.java   | 21 ++++++++++++++++
 3 files changed, 54 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/bb7ca76d/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
index cd47252..abd05bc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
@@ -293,6 +293,27 @@ public class DBAccessorImpl implements DBAccessor {
     return false;
   }
 
+  public String getCheckedForeignKey(String tableName, String fkName) throws SQLException {
+    DatabaseMetaData metaData = getDatabaseMetaData();
+
+    ResultSet rs = metaData.getImportedKeys(null, null, convertObjectName(tableName));
+
+    if (rs != null) {
+      try {
+        while (rs.next()) {
+          if (StringUtils.equalsIgnoreCase(fkName, rs.getString("FK_NAME"))) {
+            return rs.getString("FK_NAME");
+          }
+        }
+      } finally {
+        rs.close();
+      }
+    }
+
+    LOG.warn("FK {} not found for table {}", convertObjectName(fkName), convertObjectName(tableName));
+
+    return null;
+  }
   @Override
   public boolean tableHasForeignKey(String tableName, String refTableName,
           String columnName, String refColumnName) throws SQLException {
@@ -747,8 +768,9 @@ public class DBAccessorImpl implements DBAccessor {
   @Override
   public void dropFKConstraint(String tableName, String constraintName, boolean ignoreFailure) throws SQLException {
     // ToDo: figure out if name of index and constraint differs
-    if (tableHasForeignKey(convertObjectName(tableName), constraintName)) {
-      String query = dbmsHelper.getDropFKConstraintStatement(tableName, constraintName);
+    String checkedConstraintName = getCheckedForeignKey(convertObjectName(tableName), constraintName);
+    if (checkedConstraintName != null) {
+      String query = dbmsHelper.getDropFKConstraintStatement(tableName, checkedConstraintName);
       executeQuery(query, ignoreFailure);
     } else {
       LOG.warn("Constraint {} from {} table not found, nothing to drop", constraintName, tableName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/bb7ca76d/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
index 308e7c9..41754d9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
@@ -486,6 +486,14 @@ public class UpgradeCatalog210 extends AbstractUpgradeCatalog {
     dbAccessor.dropFKConstraint(HOST_COMPONENT_DESIRED_STATE_TABLE, "hstcmpnntdesiredstatecmpnntnme");
     dbAccessor.dropFKConstraint(SERVICE_CONFIG_HOSTS_TABLE, "FK_scvhosts_scv");
 
+    //These FK's hasn't been deleted previously due to MySQL case sensitivity
+    if (databaseType == Configuration.DatabaseType.MYSQL) {
+      dbAccessor.dropFKConstraint(CONFIG_GROUP_HOST_MAPPING_TABLE, "FK_configgrouphostmapping_config_group_id");
+      dbAccessor.dropFKConstraint(CLUSTER_HOST_MAPPING_TABLE, "FK_ClusterHostMapping_cluster_id");
+      dbAccessor.dropFKConstraint(KERBEROS_PRINCIPAL_HOST_TABLE, "FK_kerberos_principal_host_principal_name");
+      dbAccessor.dropFKConstraint(SERVICE_CONFIG_HOSTS_TABLE, "FK_serviceconfighosts_service_config_id");
+    }
+
     if (databaseType == Configuration.DatabaseType.DERBY) {
       for (String tableName : tablesWithHostNameInPK) {
         String constraintName = getDerbyTableConstraintName("p", tableName);
@@ -1346,7 +1354,7 @@ public class UpgradeCatalog210 extends AbstractUpgradeCatalog {
       public void run() {
         EntityManager em = getEntityManagerProvider().get();
         Query nativeQuery = em.createNativeQuery("UPDATE alert_definition SET alert_source=?1, hash=?2 WHERE " +
-                "definition_name=?3");
+          "definition_name=?3");
         nativeQuery.setParameter(1, source);
         nativeQuery.setParameter(2, newHash);
         nativeQuery.setParameter(3, alertName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/bb7ca76d/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java
index 05856b3..c867c9f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java
@@ -320,6 +320,27 @@ public class DBAccessorImplTest {
   }
 
   @Test
+  public void testGetCheckedForeignKey() throws Exception {
+    String tableName = getFreeTableName();
+    createMyTable(tableName);
+
+    DBAccessorImpl dbAccessor = injector.getInstance(DBAccessorImpl.class);
+
+    List<DBColumnInfo> columns = new ArrayList<DBColumnInfo>();
+    columns.add(new DBColumnInfo("fid", Long.class, null, null, false));
+    columns.add(new DBColumnInfo("fname", String.class, null, null, false));
+
+    String foreignTableName = getFreeTableName();
+    dbAccessor.createTable(foreignTableName, columns, "fid");
+
+    Statement statement = dbAccessor.getConnection().createStatement();
+    statement.execute("ALTER TABLE " + foreignTableName + " ADD CONSTRAINT FK_test1 FOREIGN KEY (fid) REFERENCES " +
+            tableName + " (id)");
+
+    Assert.assertEquals("FK_TEST1", dbAccessor.getCheckedForeignKey(foreignTableName, "fk_test1"));
+  }
+
+  @Test
   public void testTableExists() throws Exception {
     DBAccessorImpl dbAccessor = injector.getInstance(DBAccessorImpl.class);
 


[13/50] [abbrv] ambari git commit: AMBARI-12701. Stop-and-Start Upgrade: Handle Core Services (alejandro)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index 4cbce34..5017d39 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -48,9 +48,15 @@ upgrade_direction = default("/commandParams/upgrade_direction", None)
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
-# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
 version = default("/commandParams/version", None)
 
+# The desired role is only available during a Non-Rolling Upgrade in HA.
+# The server calculates which of the two NameNodes will be the active, and the other the standby since they
+# are started using different commands.
+desired_namenode_role = default("/commandParams/desired_namenode_role", None)
+
+
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 hdfs_user = status_params.hdfs_user
 root_user = "root"

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
index f5df86f..6a64b2f 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
@@ -19,7 +19,7 @@ limitations under the License.
 """
 from resource_management.core.logger import Logger
 
-def setup_ranger_hdfs(rolling_upgrade = False):
+def setup_ranger_hdfs(upgrade_type=None):
   import params
 
   if params.has_ranger_admin:
@@ -31,7 +31,7 @@ def setup_ranger_hdfs(rolling_upgrade = False):
 
     hdp_version = None
 
-    if rolling_upgrade:
+    if upgrade_type is not None:
       hdp_version = params.version
 
     setup_ranger_plugin('hadoop-client', 'hdfs',

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
index 108ef01..f67b1cb 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
@@ -38,13 +38,13 @@ class SNameNode(Script):
     hdfs("secondarynamenode")
     snamenode(action="configure")
 
-  def start(self, env, rolling_restart=False):
+  def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     self.configure(env)
     snamenode(action="start")
 
-  def stop(self, env, rolling_restart=False):
+  def stop(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     snamenode(action="stop")
@@ -60,9 +60,7 @@ class SNameNodeDefault(SNameNode):
   def get_stack_to_component(self):
     return {"HDP": "hadoop-hdfs-secondarynamenode"}
 
-  def pre_rolling_restart(self, env):
-    # Secondary namenode is actually removed in an HA cluster, which is a pre-requisite for Rolling Upgrade,
-    # so it does not need any Rolling Restart logic.
+  def pre_upgrade_restart(self, env, upgrade_type=None):
     pass
 
   def security_status(self, env):

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
index d1e764a..97ad424 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
@@ -32,6 +32,7 @@ from resource_management.core.logger import Logger
 from resource_management.libraries.functions.curl_krb_request import curl_krb_request
 from resource_management.core.exceptions import Fail
 from resource_management.libraries.functions.namenode_ha_utils import get_namenode_states
+from resource_management.libraries.script.script import Script
 
 from zkfc_slave import ZkfcSlave
 
@@ -336,3 +337,32 @@ def is_secure_port(port):
     return port < 1024
   else:
     return False
+
+def is_previous_fs_image():
+  """
+  Return true if there's a previous folder in the HDFS namenode directories.
+  """
+  import params
+  if params.dfs_name_dir:
+    nn_name_dirs = params.dfs_name_dir.split(',')
+    for nn_dir in nn_name_dirs:
+      prev_dir = os.path.join(nn_dir, "previous")
+      if os.path.isdir(prev_dir):
+        return True
+  return False
+
+def get_hdfs_binary(distro_component_name):
+  """
+  Get the hdfs binary to use depending on the stack and version.
+  :param distro_component_name: e.g., hadoop-hdfs-namenode, hadoop-hdfs-datanode
+  :return: The hdfs binary to use
+  """
+  import params
+  hdfs_binary = "hdfs"
+  if params.stack_name == "HDP":
+    # This was used in HDP 2.1 and earlier
+    hdfs_binary = "hdfs"
+    if Script.is_hdp_stack_greater_or_equal("2.2"):
+      hdfs_binary = "/usr/hdp/current/{0}/bin/hdfs".format(distro_component_name)
+
+  return hdfs_binary
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
index 14de094..e9037d8 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
@@ -42,7 +42,7 @@ class ZkfcSlave(Script):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class ZkfcSlaveDefault(ZkfcSlave):
 
-  def start(self, env, rolling_restart=False):
+  def start(self, env, upgrade_type=None):
     import params
 
     env.set_params(params)
@@ -68,7 +68,7 @@ class ZkfcSlaveDefault(ZkfcSlave):
       create_log_dir=True
     )
 
-  def stop(self, env, rolling_restart=False):
+  def stop(self, env, upgrade_type=None):
     import params
 
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
index f3b3d11..7644225 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
@@ -37,13 +37,13 @@ class ApplicationTimelineServer(Script):
   def install(self, env):
     self.install_packages(env)
 
-  def start(self, env, rolling_restart=False):
+  def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     self.configure(env) # FOR SECURITY
     service('timelineserver', action='start')
 
-  def stop(self, env, rolling_restart=False):
+  def stop(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     service('timelineserver', action='stop')
@@ -65,8 +65,8 @@ class ApplicationTimelineServerDefault(ApplicationTimelineServer):
   def get_stack_to_component(self):
     return {"HDP": "hadoop-yarn-timelineserver"}
 
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade pre-restart")
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
index 229dcd8..5d95c5c 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
@@ -44,7 +44,7 @@ class HistoryServer(Script):
   def install(self, env):
     self.install_packages(env)
 
-  def stop(self, env, rolling_restart=False):
+  def stop(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     service('historyserver', action='stop', serviceName='mapreduce')
@@ -72,8 +72,8 @@ class HistoryServerDefault(HistoryServer):
   def get_stack_to_component(self):
     return {"HDP": "hadoop-mapreduce-historyserver"}
 
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade pre-restart")
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
@@ -86,7 +86,7 @@ class HistoryServerDefault(HistoryServer):
       copy_to_hdfs("slider", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
       params.HdfsResource(None, action="execute")
 
-  def start(self, env, rolling_restart=False):
+  def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     self.configure(env) # FOR SECURITY

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
index 5263d9f..7ceadf0 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
@@ -52,7 +52,7 @@ class MapReduce2ClientDefault(MapReduce2Client):
   def get_stack_to_component(self):
     return {"HDP": "hadoop-client"}
 
-  def pre_rolling_restart(self, env):
+  def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
index fd25651..d508d55 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
@@ -39,12 +39,12 @@ class Nodemanager(Script):
   def install(self, env):
     self.install_packages(env)
 
-  def stop(self, env, rolling_restart=False):
+  def stop(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     service('nodemanager',action='stop')
 
-  def start(self, env, rolling_restart=False):
+  def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     self.configure(env) # FOR SECURITY
@@ -67,8 +67,8 @@ class NodemanagerDefault(Nodemanager):
   def get_stack_to_component(self):
     return {"HDP": "hadoop-yarn-nodemanager"}
 
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing NodeManager Rolling Upgrade pre-restart")
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing NodeManager Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
@@ -76,8 +76,8 @@ class NodemanagerDefault(Nodemanager):
       conf_select.select(params.stack_name, "hadoop", params.version)
       hdp_select.select("hadoop-yarn-nodemanager", params.version)
 
-  def post_rolling_restart(self, env):
-    Logger.info("Executing NodeManager Rolling Upgrade post-restart")
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing NodeManager Stack Upgrade post-restart")
     import params
     env.set_params(params)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index ba65fbc..929269d 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -57,7 +57,7 @@ stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version_major = format_hdp_stack_version(stack_version_unformatted)
 hdp_stack_version = functions.get_hdp_version('hadoop-yarn-resourcemanager')
 
-# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
+# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade.
 # It cannot be used during the initial Cluser Install because the version is not yet known.
 version = default("/commandParams/version", None)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
index e67f1ce..ec7799e 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
@@ -47,7 +47,7 @@ class Resourcemanager(Script):
   def install(self, env):
     self.install_packages(env)
 
-  def stop(self, env, rolling_restart=False):
+  def stop(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     service('resourcemanager', action='stop')
@@ -97,8 +97,8 @@ class ResourcemanagerDefault(Resourcemanager):
   def get_stack_to_component(self):
     return {"HDP": "hadoop-yarn-resourcemanager"}
 
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade post-restart")
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade post-restart")
     import params
     env.set_params(params)
 
@@ -106,7 +106,7 @@ class ResourcemanagerDefault(Resourcemanager):
       conf_select.select(params.stack_name, "hadoop", params.version)
       hdp_select.select("hadoop-yarn-resourcemanager", params.version)
 
-  def start(self, env, rolling_restart=False):
+  def start(self, env, upgrade_type=None):
     import params
 
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
index e58ea3c..0c6115f 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
@@ -52,7 +52,7 @@ class YarnClientDefault(YarnClient):
   def get_stack_to_component(self):
     return {"HDP": "hadoop-client"}
 
-  def pre_rolling_restart(self, env):
+  def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
index 973fa0f..ce5545f 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
@@ -28,13 +28,13 @@ from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def zookeeper(type = None, rolling_restart = False):
+def zookeeper(type = None, upgrade_type=None):
   import params
 
   if type == 'server':
     # This path may be missing after Ambari upgrade. We need to create it. We need to do this before any configs will
     # be applied.
-    if not rolling_restart and not os.path.exists("/usr/hdp/current/zookeeper-server") and params.current_version:
+    if upgrade_type is None and not os.path.exists("/usr/hdp/current/zookeeper-server") and params.current_version:
       conf_select.select(params.stack_name, "zookeeper", params.current_version)
       hdp_select.select("zookeeper-server", params.version)
 
@@ -108,7 +108,7 @@ def zookeeper(type = None, rolling_restart = False):
   )
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def zookeeper(type = None, rolling_restart = False):
+def zookeeper(type = None, upgrade_type=None):
   import params
   configFile("zoo.cfg", template_name="zoo.cfg.j2", mode="f")
   configFile("configuration.xsl", template_name="configuration.xsl.j2", mode="f")

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
index 02c1006..7a11fee 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
@@ -37,13 +37,13 @@ class ZookeeperClient(Script):
     zookeeper(type='client')
     pass
 
-  def start(self, env, rolling_restart=False):
+  def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     self.configure(env)
     pass
 
-  def stop(self, env, rolling_restart=False):
+  def stop(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     pass
@@ -60,8 +60,8 @@ class ZookeeperClientLinux(ZookeeperClient):
     self.install_packages(env)
     self.configure(env)
 
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade pre-restart")
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
index b7fb578..842deb0 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
@@ -43,21 +43,21 @@ from ambari_commons.os_family_impl import OsFamilyImpl
 
 class ZookeeperServer(Script):
 
-  def configure(self, env, rolling_restart=False):
+  def configure(self, env, upgrade_type=None):
     import params
     env.set_params(params)
-    zookeeper(type='server', rolling_restart=rolling_restart)
+    zookeeper(type='server', upgrade_type=upgrade_type)
 
-  def start(self, env, rolling_restart=False):
+  def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)
-    self.configure(env, rolling_restart=rolling_restart)
-    zookeeper_service(action='start', rolling_restart=rolling_restart)
+    self.configure(env, upgrade_type=upgrade_type)
+    zookeeper_service(action='start', upgrade_type=upgrade_type)
 
-  def stop(self, env, rolling_restart=False):
+  def stop(self, env, upgrade_type=None):
     import params
     env.set_params(params)
-    zookeeper_service(action='stop', rolling_restart=rolling_restart)
+    zookeeper_service(action='stop', upgrade_type=upgrade_type)
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class ZookeeperServerLinux(ZookeeperServer):
@@ -69,8 +69,8 @@ class ZookeeperServerLinux(ZookeeperServer):
     self.install_packages(env)
     self.configure(env)
 
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade pre-restart")
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
@@ -78,8 +78,11 @@ class ZookeeperServerLinux(ZookeeperServer):
       conf_select.select(params.stack_name, "zookeeper", params.version)
       hdp_select.select("zookeeper-server", params.version)
 
-  def post_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade post-restart")
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    if upgrade_type == "nonrolling":
+      return
+
+    Logger.info("Executing Stack Upgrade post-restart")
     import params
     env.set_params(params)
     zk_server_host = random.choice(params.zookeeper_hosts)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py
index 685eb6d..14cd85c 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py
@@ -27,11 +27,11 @@ from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import hdp_select
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def zookeeper_service(action='start', rolling_restart=False):
+def zookeeper_service(action='start', upgrade_type=None):
   import params
 
   # This path may be missing after Ambari upgrade. We need to create it.
-  if not rolling_restart and not os.path.exists("/usr/hdp/current/zookeeper-server") and params.current_version:
+  if upgrade_type is None and not os.path.exists("/usr/hdp/current/zookeeper-server") and params.current_version:
     conf_select.select(params.stack_name, "zookeeper", params.current_version)
     hdp_select.select("zookeeper-server", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
index daa1e08..c81b1ea 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
@@ -92,7 +92,7 @@
       <skippable>true</skippable>
       <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Backup Oozie Database">
         <task xsi:type="manual">
-          <message>Before continuing, please backup the Oozie Server database on {{oozie-env/oozie_hostname}}.</message>
+          <message>Before continuing, please backup the Oozie Server database on {{hosts.all}}.</message>
         </task>
       </execute-stage>
 
@@ -178,6 +178,16 @@
       </execute-stage>
     </group>
 
+    <group xsi:type="cluster" name="Upgrade service configs" title="Upgrade service configs">
+      <direction>UPGRADE</direction>   <!--  prevent config changes on downgrade -->
+      <skippable>false</skippable>
+
+      <!--YARN-->
+      <execute-stage service="MAPREDUCE2" component="HISTORYSERVER">
+        <task xsi:type="configure" id="hdp_2_2_0_0_historyserver_classpath"/>
+      </execute-stage>
+    </group>
+
     <!-- After processing this group, will change the effective Stack of the UpgradeContext object. -->
     <group xsi:type="update-stack" name="UPDATE_DESIRED_STACK_ID" title="Update Desired Stack Id">
       <execute-stage title="Update Desired Stack Id" service="" component="">
@@ -221,6 +231,19 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="HDFS_LEAFE_SAFEMODE" title="HDFS - Wait to leave Safemode">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <direction>UPGRADE</direction>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Wait to leave Safemode">
+        <task xsi:type="execute" hosts="master" summary="Wait for NameNode to leave Safemode">
+          <script>scripts/namenode.py</script>
+          <function>wait_for_safemode_off</function>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="MR and YARN" title="MR and YARN">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -266,10 +289,10 @@
       <skippable>true</skippable>
       <direction>UPGRADE</direction>
       <priority>
-        <service>HBASE</service>
-        <service>MAPREDUCE2</service>
-        <service>YARN</service>
         <service>HDFS</service>
+        <service>YARN</service>
+        <service>MAPREDUCE2</service>
+        <service>HBASE</service>
       </priority>
     </group>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/config-upgrade.xml
index 9c96dfb..7f8faf0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/config-upgrade.xml
@@ -19,6 +19,17 @@
 <upgrade-config-changes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
 
   <services>
+    <service name="YARN">
+      <component name="HISTORY_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_2_0_0_historyserver_classpath" summary="YARN Application Classpath">
+            <type>yarn-site</type>
+            <set key="yarn.application.classpath" value="$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
     <service name="HIVE">
       <component name="HIVE_SERVER">
         <changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml
index 8fbb963..5aacfa0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml
@@ -271,6 +271,19 @@
       </service>
     </group>
 
+    <group xsi:type="cluster" name="HDFS_LEAFE_SAFEMODE" title="HDFS - Wait to leave Safemode">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <direction>UPGRADE</direction>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Wait to leave Safemode">
+        <task xsi:type="execute" hosts="master" summary="Wait for NameNode to leave Safemode">
+          <script>scripts/namenode.py</script>
+          <function>wait_for_safemode_off</function>
+        </task>
+      </execute-stage>
+    </group>
+
     <group xsi:type="restart" name="MR and YARN" title="MR and YARN">
       <service-check>false</service-check>
       <skippable>true</skippable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
index bac00d4..94a5336 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
@@ -20,6 +20,7 @@ package org.apache.ambari.server.state.stack.upgrade;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.ambari.server.serveraction.upgrades.AutoSkipFailedSummaryAction;
 import org.apache.ambari.server.stack.HostsType;
@@ -125,7 +126,7 @@ public class StageWrapperBuilderTest {
      */
     @Override
     public void add(UpgradeContext upgradeContext, HostsType hostsType, String service,
-        boolean clientOnly, ProcessingComponent pc) {
+        boolean clientOnly, ProcessingComponent pc, Map<String, String> params) {
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index fd66502..263eeb2 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -450,7 +450,7 @@ class TestDatanode(RMFTestCase):
                               )
 
 
-  def test_pre_rolling_restart(self):
+  def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -458,7 +458,7 @@ class TestDatanode(RMFTestCase):
     json_content['commandParams']['version'] = version
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
@@ -468,7 +468,7 @@ class TestDatanode(RMFTestCase):
 
 
   @patch("resource_management.core.shell.call")
-  def test_pre_rolling_restart_23(self, call_mock):
+  def test_pre_upgrade_restart_23(self, call_mock):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -478,7 +478,7 @@ class TestDatanode(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
@@ -499,7 +499,7 @@ class TestDatanode(RMFTestCase):
 
 
   @patch('time.sleep')
-  def test_post_rolling_restart(self, time_mock):
+  def test_post_upgrade_restart(self, time_mock):
     shell_call_output = """
       Live datanodes (2):
 
@@ -523,7 +523,7 @@ class TestDatanode(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
-                       command = "post_rolling_restart",
+                       command = "post_upgrade_restart",
                        config_file = "default.json",
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
@@ -537,12 +537,12 @@ class TestDatanode(RMFTestCase):
 
 
   @patch('time.sleep')
-  def test_post_rolling_restart_datanode_not_ready(self, time_mock):
+  def test_post_upgrade_restart_datanode_not_ready(self, time_mock):
     mocks_dict = {}
     try:
       self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                          classname = "DataNode",
-                         command = "post_rolling_restart",
+                         command = "post_upgrade_restart",
                          config_file = "default.json",
                          hdp_stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES,
@@ -556,12 +556,12 @@ class TestDatanode(RMFTestCase):
 
 
   @patch('time.sleep')
-  def test_post_rolling_restart_bad_returncode(self, time_mock):
+  def test_post_upgrade_restart_bad_returncode(self, time_mock):
     try:
       mocks_dict = {}
       self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                          classname = "DataNode",
-                         command = "post_rolling_restart",
+                         command = "post_upgrade_restart",
                          config_file = "default.json",
                          hdp_stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES,
@@ -594,7 +594,7 @@ class TestDatanode(RMFTestCase):
         hdp_stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES,
         call_mocks = call_mock_side_effects,
-        command_args=[True])
+        command_args=["rolling"])
 
       raise Fail("Expected a fail since datanode didn't report a shutdown")
     except Exception, err:

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
index 9d93128..055f291 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
@@ -187,7 +187,7 @@ class Test(RMFTestCase):
 
 
   @patch("resource_management.core.shell.call")
-  def test_pre_rolling_restart_23(self, call_mock):
+  def test_pre_upgrade_restart_23(self, call_mock):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -197,7 +197,7 @@ class Test(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hdfs_client.py",
                        classname = "HdfsClient",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
@@ -216,7 +216,7 @@ class Test(RMFTestCase):
       ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 
-  def test_pre_rolling_restart(self):
+  def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -224,7 +224,7 @@ class Test(RMFTestCase):
     json_content['commandParams']['version'] = version
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hdfs_client.py",
                        classname = "HdfsClient",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
index d333071..a6cd740 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
@@ -260,7 +260,7 @@ class TestJournalnode(RMFTestCase):
 
 
   @patch('time.sleep')
-  def test_post_rolling_restart(self, time_mock):
+  def test_post_upgrade_restart(self, time_mock):
     # load the NN and JN JMX files so that the urllib2.urlopen mock has data
     # to return
     num_journalnodes = 3
@@ -295,7 +295,7 @@ class TestJournalnode(RMFTestCase):
       with patch.object(urllib2, "urlopen", urlopen_mock):
        with patch.object(NamenodeHAState, "get_address", get_address_mock):
          self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
-           classname = "JournalNode", command = "post_rolling_restart",
+           classname = "JournalNode", command = "post_upgrade_restart",
            config_file = "journalnode-upgrade.json",
            checked_call_mocks = [(0, str(namenode_status_active)), (0, str(namenode_status_standby))],
            hdp_stack_version = self.UPGRADE_STACK_VERSION,
@@ -314,7 +314,7 @@ class TestJournalnode(RMFTestCase):
       with patch.object(urllib2, "urlopen", urlopen_mock):
         with patch.object(NamenodeHAState, "get_address", get_address_mock):
          self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
-           classname = "JournalNode", command = "post_rolling_restart",
+           classname = "JournalNode", command = "post_upgrade_restart",
            config_file = "journalnode-upgrade-hdfs-secure.json",
            checked_call_mocks = [(0, str(namenode_status_active)), (0, str(namenode_status_standby))],
            hdp_stack_version = self.UPGRADE_STACK_VERSION,
@@ -328,7 +328,7 @@ class TestJournalnode(RMFTestCase):
 
   @patch('time.sleep')
   @patch("urllib2.urlopen")
-  def test_post_rolling_restart_bad_jmx(self, urlopen_mock, time_mock):
+  def test_post_upgrade_restart_bad_jmx(self, urlopen_mock, time_mock):
     urlopen_mock_response = '{ "bad_data" : "gonna_mess_you_up" }'
 
     url_stream_mock = MagicMock()
@@ -337,7 +337,7 @@ class TestJournalnode(RMFTestCase):
 
     try:
       self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
-        classname = "JournalNode", command = "post_rolling_restart",
+        classname = "JournalNode", command = "post_upgrade_restart",
         config_file = "journalnode-upgrade.json",
         hdp_stack_version = self.UPGRADE_STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES )
@@ -460,7 +460,7 @@ class TestJournalnode(RMFTestCase):
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
 
 
-  def test_pre_rolling_restart(self):
+  def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -468,7 +468,7 @@ class TestJournalnode(RMFTestCase):
     json_content['commandParams']['version'] = version
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
                        classname = "JournalNode",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
@@ -476,7 +476,7 @@ class TestJournalnode(RMFTestCase):
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
-  def test_pre_rolling_restart_23(self, call_mock):
+  def test_pre_upgrade_restart_23(self, call_mock):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -486,7 +486,7 @@ class TestJournalnode(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
                        classname = "JournalNode",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index e954a84..afa404c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -1302,7 +1302,7 @@ class TestNamenode(RMFTestCase):
                      hdp_stack_version = self.STACK_VERSION,
                      target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-  def test_pre_rolling_restart(self):
+  def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -1310,7 +1310,7 @@ class TestNamenode(RMFTestCase):
     json_content['commandParams']['version'] = version
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
@@ -1319,7 +1319,7 @@ class TestNamenode(RMFTestCase):
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
-  def test_pre_rolling_restart_23(self, call_mock):
+  def test_pre_upgrade_restart_23(self, call_mock):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -1329,7 +1329,7 @@ class TestNamenode(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
@@ -1347,13 +1347,13 @@ class TestNamenode(RMFTestCase):
       ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
 
-  def test_post_rolling_restart(self):
+  def test_post_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
-                       command = "post_rolling_restart",
+                       command = "post_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
@@ -1439,7 +1439,7 @@ class TestNamenode(RMFTestCase):
     self.assertNoMoreResources()
 
   @patch.object(shell, "call")
-  def test_pre_rolling_restart_21_and_lower_params(self, call_mock):
+  def test_pre_upgrade_restart_21_and_lower_params(self, call_mock):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/nn_ru_lzo.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -1449,7 +1449,7 @@ class TestNamenode(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
@@ -1462,7 +1462,7 @@ class TestNamenode(RMFTestCase):
     self.assertEquals("/usr/lib/hadoop/sbin", sys.modules["params"].hadoop_bin)
 
   @patch.object(shell, "call")
-  def test_pre_rolling_restart_22_params(self, call_mock):
+  def test_pre_upgrade_restart_22_params(self, call_mock):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/nn_ru_lzo.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -1474,7 +1474,7 @@ class TestNamenode(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
@@ -1487,7 +1487,7 @@ class TestNamenode(RMFTestCase):
     self.assertEquals("/usr/hdp/current/hadoop-client/sbin", sys.modules["params"].hadoop_bin)
 
   @patch.object(shell, "call")
-  def test_pre_rolling_restart_23_params(self, call_mock):
+  def test_pre_upgrade_restart_23_params(self, call_mock):
     import itertools
 
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/nn_ru_lzo.json"
@@ -1502,7 +1502,7 @@ class TestNamenode(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
index ee85e4a..5852eaf 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
@@ -384,7 +384,7 @@ class TestNFSGateway(RMFTestCase):
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
-  def test_pre_rolling_restart(self, call_mock):
+  def test_pre_upgrade_restart(self, call_mock):
     call_mock.side_effects = [(0, None), (0, None)]
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
@@ -395,7 +395,7 @@ class TestNFSGateway(RMFTestCase):
     json_content['hostLevelParams']['stack_version'] = stack_version
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nfsgateway.py",
                        classname = "NFSGateway",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
index 1321aaa..40a085f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
@@ -767,7 +767,7 @@ class TestHistoryServer(RMFTestCase):
   @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value="2.3.0"))
   @patch.object(functions, "get_hdp_version", new = MagicMock(return_value="2.3.0.0-1234"))
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
-  def test_pre_rolling_restart_23(self, copy_to_hdfs_mock):
+  def test_pre_upgrade_restart_23(self, copy_to_hdfs_mock):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -778,7 +778,7 @@ class TestHistoryServer(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
                        classname = "HistoryServer",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
index 532ce36..dd20b79 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
@@ -387,7 +387,7 @@ class TestMapReduce2Client(RMFTestCase):
     self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
 
 
-  def test_pre_rolling_restart_23(self):
+  def test_pre_upgrade_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/client-upgrade.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -397,7 +397,7 @@ class TestMapReduce2Client(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mapreduce2_client.py",
                        classname = "MapReduce2Client",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
index e7da747..3ccde3b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
@@ -520,7 +520,7 @@ class TestNodeManager(RMFTestCase):
 
   @patch('time.sleep')
   @patch.object(resource_management.libraries.functions, "get_hdp_version", new = MagicMock(return_value='2.3.0.0-1234'))
-  def test_post_rolling_restart(self, time_mock):
+  def test_post_upgrade_restart(self, time_mock):
     process_output = """
       c6401.ambari.apache.org:45454  RUNNING  c6401.ambari.apache.org:8042  0
     """
@@ -528,7 +528,7 @@ class TestNodeManager(RMFTestCase):
 
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
       classname = "Nodemanager",
-      command = "post_rolling_restart",
+      command = "post_upgrade_restart",
       config_file = "default.json",
       hdp_stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
@@ -547,7 +547,7 @@ class TestNodeManager(RMFTestCase):
 
 
   @patch('time.sleep')
-  def test_post_rolling_restart_nodemanager_not_ready(self, time_mock):
+  def test_post_upgrade_restart_nodemanager_not_ready(self, time_mock):
     process_output = """
       c9999.ambari.apache.org:45454  RUNNING  c9999.ambari.apache.org:8042  0
     """
@@ -556,7 +556,7 @@ class TestNodeManager(RMFTestCase):
     try:
       self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
                          classname="Nodemanager",
-                         command = "post_rolling_restart",
+                         command = "post_upgrade_restart",
                          config_file="default.json",
                          hdp_stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES,
@@ -570,7 +570,7 @@ class TestNodeManager(RMFTestCase):
 
 
   @patch('time.sleep')
-  def test_post_rolling_restart_nodemanager_not_ready(self, time_mock):
+  def test_post_upgrade_restart_nodemanager_not_ready(self, time_mock):
     process_output = """
       c6401.ambari.apache.org:45454  RUNNING  c6401.ambari.apache.org:8042  0
     """
@@ -579,7 +579,7 @@ class TestNodeManager(RMFTestCase):
     try:
       self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
                          classname="Nodemanager",
-                         command = "post_rolling_restart",
+                         command = "post_upgrade_restart",
                          config_file="default.json",
                          hdp_stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES,
@@ -701,7 +701,7 @@ class TestNodeManager(RMFTestCase):
 
   
   @patch.object(resource_management.libraries.functions, "get_hdp_version", new = MagicMock(return_value='2.3.0.0-1234'))
-  def test_pre_rolling_restart_23(self):
+  def test_pre_upgrade_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -711,7 +711,7 @@ class TestNodeManager(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
                        classname = "Nodemanager",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
index a965c90..4639bd4 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
@@ -623,7 +623,7 @@ class TestResourceManager(RMFTestCase):
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
 
-  def test_pre_rolling_restart_23(self):
+  def test_pre_upgrade_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -633,7 +633,7 @@ class TestResourceManager(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
                        classname = "Resourcemanager",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
index 413b2ad..78043f6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
@@ -556,7 +556,7 @@ class TestYarnClient(RMFTestCase):
     # for now, it's enough that hdp-select is confirmed
 
   @patch.object(functions, "get_hdp_version", new = MagicMock(return_value='2.3.0.0-1234'))
-  def test_pre_rolling_restart_23(self):
+  def test_pre_upgrade_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -566,7 +566,7 @@ class TestYarnClient(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
                        classname = "YarnClient",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
index 7a624bd..7a6d225 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
@@ -154,7 +154,7 @@ class TestZookeeperClient(RMFTestCase):
     self.assertNoMoreResources()
 
 
-  def test_pre_rolling_restart(self):
+  def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -162,7 +162,7 @@ class TestZookeeperClient(RMFTestCase):
     json_content['commandParams']['version'] = version
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_client.py",
                        classname = "ZookeeperClient",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
@@ -171,7 +171,7 @@ class TestZookeeperClient(RMFTestCase):
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
-  def test_pre_rolling_restart_23(self, call_mock):
+  def test_pre_upgrade_restart_23(self, call_mock):
     call_mock.side_effects = [(0, None), (0, None)]
 
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
@@ -183,7 +183,7 @@ class TestZookeeperClient(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_client.py",
                        classname = "ZookeeperClient",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
index a6d610f..8949eaa 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
@@ -340,7 +340,7 @@ class TestZookeeperServer(RMFTestCase):
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
 
 
-  def test_pre_rolling_restart(self):
+  def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -348,7 +348,7 @@ class TestZookeeperServer(RMFTestCase):
     json_content['commandParams']['version'] = version
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
                        classname = "ZookeeperServer",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
@@ -357,7 +357,7 @@ class TestZookeeperServer(RMFTestCase):
     self.assertNoMoreResources()
 
   @patch("resource_management.core.shell.call")
-  def test_pre_rolling_restart_23(self, call_mock):
+  def test_pre_upgrade_restart_23(self, call_mock):
     call_mock.side_effects = [(0, None), (0, None)]
 
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
@@ -369,7 +369,7 @@ class TestZookeeperServer(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
                        classname = "ZookeeperServer",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
@@ -391,7 +391,7 @@ class TestZookeeperServer(RMFTestCase):
     self.assertNoMoreResources()
 
   @patch.object(resource_management.libraries.functions, "get_unique_id_and_date")
-  def test_post_rolling_restart(self, get_unique_id_and_date_mock):
+  def test_post_upgrade_restart(self, get_unique_id_and_date_mock):
     unique_value = "unique1"
     get_unique_id_and_date_mock.return_value = unique_value
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
@@ -403,7 +403,7 @@ class TestZookeeperServer(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
                        classname = "ZookeeperServer",
-                       command = "post_rolling_restart",
+                       command = "post_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,

http://git-wip-us.apache.org/repos/asf/ambari/blob/7afe5a4e/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
index 6f3ea6d..0e467d8 100644
--- a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
+++ b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
@@ -375,7 +375,7 @@ class TestAppTimelineServer(RMFTestCase):
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
 
   @patch.object(resource_management.libraries.functions, "get_hdp_version", new = MagicMock(return_value='2.3.0.0-1234'))
-  def test_pre_rolling_restart_23(self):
+  def test_pre_upgrade_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -385,7 +385,7 @@ class TestAppTimelineServer(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
                        classname = "ApplicationTimelineServer",
-                       command = "pre_rolling_restart",
+                       command = "pre_upgrade_restart",
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,


[15/50] [abbrv] ambari git commit: AMBARI-13476 Ranger usersync LDAP properties should be set same to ambari if ambari is configured with LDAP (dsen)

Posted by nc...@apache.org.
AMBARI-13476 Ranger usersync LDAP properties should be set same to ambari if ambari is configured with LDAP (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5eff7979
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5eff7979
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5eff7979

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 5eff7979a37af5e7339b6b65fa99dee612db6c38
Parents: 7afe5a4
Author: Dmytro Sen <ds...@apache.org>
Authored: Thu Oct 22 11:10:26 2015 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Thu Oct 22 11:10:26 2015 +0300

----------------------------------------------------------------------
 .../stacks/HDP/2.0.6/services/stack_advisor.py  | 18 ++++
 .../stacks/HDP/2.3/services/stack_advisor.py    | 24 ++++++
 .../stacks/2.0.6/common/test_stack_advisor.py   | 39 +++++++++
 .../stacks/2.3/common/test_stack_advisor.py     | 86 ++++++++++++++++++++
 4 files changed, 167 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5eff7979/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index 7fb9884..3db5bfd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -305,6 +305,24 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     policymgr_external_url = "%s://%s:%s" % (protocol, ranger_admin_host, port)
     putRangerAdminProperty('policymgr_external_url', policymgr_external_url)
 
+    # Recommend ldap settings based on ambari.properties configuration
+    # If 'ambari.ldap.isConfigured' == true
+    # For stack_version 2.2
+    stackVersion = services["Versions"]["stack_version"]
+    if stackVersion == '2.2' and 'ambari-server-properties' in services and \
+      'ambari.ldap.isConfigured' in services['ambari-server-properties'] and \
+      services['ambari-server-properties']['ambari.ldap.isConfigured'].lower() == "true":
+      putUserSyncProperty = self.putProperty(configurations, "usersync-properties", services)
+      serverProperties = services['ambari-server-properties']
+      if 'authentication.ldap.managerDn' in serverProperties:
+        putUserSyncProperty('SYNC_LDAP_BIND_DN', serverProperties['authentication.ldap.managerDn'])
+      if 'authentication.ldap.primaryUrl' in serverProperties:
+        putUserSyncProperty('SYNC_LDAP_URL', serverProperties['authentication.ldap.primaryUrl'])
+      if 'authentication.ldap.userObjectClass' in serverProperties:
+        putUserSyncProperty('SYNC_LDAP_USER_OBJECT_CLASS', serverProperties['authentication.ldap.userObjectClass'])
+      if 'authentication.ldap.usernameAttribute' in serverProperties:
+        putUserSyncProperty('SYNC_LDAP_USER_NAME_ATTRIBUTE', serverProperties['authentication.ldap.usernameAttribute'])
+
 
   def getAmsMemoryRecommendation(self, services, hosts):
     # MB per sink in hbase heapsize

http://git-wip-us.apache.org/repos/asf/ambari/blob/5eff7979/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index 501517f..7a6662c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -264,6 +264,7 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     putRangerAdminProperty = self.putProperty(configurations, "ranger-admin-site", services)
     putRangerEnvProperty = self.putProperty(configurations, "ranger-env", services)
+    putRangerUgsyncSite = self.putProperty(configurations, "ranger-ugsync-site", services)
 
     if 'admin-properties' in services['configurations'] and ('DB_FLAVOR' in services['configurations']['admin-properties']['properties'])\
       and ('db_host' in services['configurations']['admin-properties']['properties']) and ('db_name' in services['configurations']['admin-properties']['properties']):
@@ -298,6 +299,29 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
         for key in rangerPrivelegeDbProperties:
           putRangerEnvProperty(key, rangerPrivelegeDbProperties.get(key))
 
+    # Recommend ldap settings based on ambari.properties configuration
+    if 'ambari-server-properties' in services and \
+        'ambari.ldap.isConfigured' in services['ambari-server-properties'] and \
+        services['ambari-server-properties']['ambari.ldap.isConfigured'].lower() == "true":
+      serverProperties = services['ambari-server-properties']
+      if 'authentication.ldap.baseDn' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.ldap.searchBase', serverProperties['authentication.ldap.baseDn'])
+      if 'authentication.ldap.groupMembershipAttr' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.group.memberattributename', serverProperties['authentication.ldap.groupMembershipAttr'])
+      if 'authentication.ldap.groupNamingAttr' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.group.nameattribute', serverProperties['authentication.ldap.groupNamingAttr'])
+      if 'authentication.ldap.groupObjectClass' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.group.objectclass', serverProperties['authentication.ldap.groupObjectClass'])
+      if 'authentication.ldap.managerDn' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.ldap.binddn', serverProperties['authentication.ldap.managerDn'])
+      if 'authentication.ldap.primaryUrl' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.ldap.url', serverProperties['authentication.ldap.primaryUrl'])
+      if 'authentication.ldap.userObjectClass' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.ldap.user.objectclass', serverProperties['authentication.ldap.userObjectClass'])
+      if 'authentication.ldap.usernameAttribute' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.ldap.user.nameattribute', serverProperties['authentication.ldap.usernameAttribute'])
+
+
     # Recommend ranger.audit.solr.zookeepers and xasecure.audit.destination.hdfs.dir
     include_hdfs = "HDFS" in servicesList
     zookeeper_host_port = self.getZKHostPortString(services)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5eff7979/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index abddc71..85d6436 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -681,6 +681,9 @@ class TestHDP206StackAdvisor(TestCase):
     clusterData = {}
     # Recommend for not existing DB_FLAVOR and http enabled, HDP-2.3
     services = {
+      "Versions" : {
+        "stack_version" : "2.2",
+      },
       "services":  [
         {
           "StackServices": {
@@ -778,6 +781,42 @@ class TestHDP206StackAdvisor(TestCase):
     self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
     self.assertEquals(recommendedConfigurations, expected)
 
+    # Test Recommend LDAP values
+    services["ambari-server-properties"] = {
+      "ambari.ldap.isConfigured" : "true",
+      "authentication.ldap.bindAnonymously" : "false",
+      "authentication.ldap.baseDn" : "dc=apache,dc=org",
+      "authentication.ldap.groupNamingAttr" : "cn",
+      "authentication.ldap.primaryUrl" : "c6403.ambari.apache.org:389",
+      "authentication.ldap.userObjectClass" : "posixAccount",
+      "authentication.ldap.secondaryUrl" : "c6403.ambari.apache.org:389",
+      "authentication.ldap.usernameAttribute" : "uid",
+      "authentication.ldap.dnAttribute" : "dn",
+      "authentication.ldap.useSSL" : "false",
+      "authentication.ldap.managerPassword" : "/etc/ambari-server/conf/ldap-password.dat",
+      "authentication.ldap.groupMembershipAttr" : "memberUid",
+      "authentication.ldap.groupObjectClass" : "posixGroup",
+      "authentication.ldap.managerDn" : "uid=hdfs,ou=people,ou=dev,dc=apache,dc=org"
+    }
+    services["configurations"] = {}
+    expected = {
+      'admin-properties': {
+        'properties': {
+          'policymgr_external_url': 'http://host1:6080',
+        }
+      },
+      'usersync-properties': {
+        'properties': {
+          'SYNC_LDAP_URL': 'c6403.ambari.apache.org:389',
+          'SYNC_LDAP_BIND_DN': 'uid=hdfs,ou=people,ou=dev,dc=apache,dc=org',
+          'SYNC_LDAP_USER_OBJECT_CLASS': 'posixAccount',
+          'SYNC_LDAP_USER_NAME_ATTRIBUTE': 'uid'
+        }
+      }
+    }
+    recommendedConfigurations = {}
+    self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
+    self.assertEquals(recommendedConfigurations, expected)
 
 
   def test_recommendHDFSConfigurations(self):

http://git-wip-us.apache.org/repos/asf/ambari/blob/5eff7979/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
index e0c6d28..ff6c93e 100644
--- a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
@@ -813,3 +813,89 @@ class TestHDP23StackAdvisor(TestCase):
     self.assertTrue(exceptionThrown)
 
     pass
+
+  def test_recommendRangerConfigurations(self):
+    clusterData = {}
+    # Recommend for not existing DB_FLAVOR and http enabled, HDP-2.3
+    services = {
+      "Versions" : {
+        "stack_version" : "2.3",
+        },
+      "services":  [
+        {
+          "StackServices": {
+            "service_name": "RANGER"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "RANGER_ADMIN",
+                "hostnames": ["host1"]
+              }
+            }
+          ]
+        },
+        ],
+      "configurations": {
+        "admin-properties": {
+          "properties": {
+            "DB_FLAVOR": "NOT_EXISTING",
+            }
+        },
+        "ranger-admin-site": {
+          "properties": {
+            "ranger.service.http.port": "7777",
+            "ranger.service.http.enabled": "true",
+            }
+        }
+      },
+      "ambari-server-properties": {
+        "ambari.ldap.isConfigured" : "true",
+        "authentication.ldap.bindAnonymously" : "false",
+        "authentication.ldap.baseDn" : "dc=apache,dc=org",
+        "authentication.ldap.groupNamingAttr" : "cn",
+        "authentication.ldap.primaryUrl" : "c6403.ambari.apache.org:389",
+        "authentication.ldap.userObjectClass" : "posixAccount",
+        "authentication.ldap.secondaryUrl" : "c6403.ambari.apache.org:389",
+        "authentication.ldap.usernameAttribute" : "uid",
+        "authentication.ldap.dnAttribute" : "dn",
+        "authentication.ldap.useSSL" : "false",
+        "authentication.ldap.managerPassword" : "/etc/ambari-server/conf/ldap-password.dat",
+        "authentication.ldap.groupMembershipAttr" : "memberUid",
+        "authentication.ldap.groupObjectClass" : "posixGroup",
+        "authentication.ldap.managerDn" : "uid=hdfs,ou=people,ou=dev,dc=apache,dc=org"
+      }
+    }
+
+    expected = {
+      'admin-properties': {
+        'properties': {
+          'policymgr_external_url': 'http://host1:7777',
+          'SQL_CONNECTOR_JAR': '/usr/share/java/mysql-connector-java.jar'
+        }
+      },
+      'ranger-ugsync-site': {
+        'properties': {
+          'ranger.usersync.group.objectclass': 'posixGroup',
+          'ranger.usersync.group.nameattribute': 'cn',
+          'ranger.usersync.group.memberattributename': 'memberUid',
+          'ranger.usersync.ldap.binddn': 'uid=hdfs,ou=people,ou=dev,dc=apache,dc=org',
+          'ranger.usersync.ldap.user.nameattribute': 'uid',
+          'ranger.usersync.ldap.user.objectclass': 'posixAccount',
+          'ranger.usersync.ldap.url': 'c6403.ambari.apache.org:389',
+          'ranger.usersync.ldap.searchBase': 'dc=apache,dc=org'
+        }
+      },
+      'ranger-admin-site': {
+        'properties': {
+        }
+      },
+      'ranger-env': {
+        'properties': {}
+      }
+    }
+
+    recommendedConfigurations = {}
+    self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
+    self.assertEquals(recommendedConfigurations, expected)
+


[11/50] [abbrv] ambari git commit: AMBARI-13512. Show a warning popup before user is about to be logged out (rzang)

Posted by nc...@apache.org.
AMBARI-13512. Show a warning popup before user is about to be logged out (rzang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ac51d8b6
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ac51d8b6
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ac51d8b6

Branch: refs/heads/branch-dev-patch-upgrade
Commit: ac51d8b6cee62aee30b940d38da10f0f266ba7a5
Parents: 8ef5beb
Author: Richard Zang <rz...@apache.org>
Authored: Wed Oct 21 15:30:30 2015 -0700
Committer: Richard Zang <rz...@apache.org>
Committed: Wed Oct 21 15:31:21 2015 -0700

----------------------------------------------------------------------
 .../app/scripts/controllers/mainCtrl.js         | 49 +++++++++++++-----
 .../ui/admin-web/app/scripts/services/Auth.js   |  5 ++
 .../app/views/modals/TimeoutWarning.html        | 28 +++++++++++
 ambari-web/app/config.js                        |  1 +
 ambari-web/app/controllers/main.js              | 52 ++++++++++++++++----
 ambari-web/app/messages.js                      |  5 ++
 6 files changed, 119 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ac51d8b6/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/mainCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/mainCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/mainCtrl.js
index 2c9e1c9..1213d0d 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/mainCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/mainCtrl.js
@@ -20,12 +20,6 @@
 angular.module('ambariAdminConsole')
 .controller('MainCtrl',['$scope','$rootScope','$window','Auth', 'Alert', '$modal', 'Cluster', 'View', function($scope, $rootScope, $window, Auth, Alert, $modal, Cluster, View) {
   $scope.signOut = function() {
-    var data = JSON.parse(localStorage.ambari);
-    delete data.app.authenticated;
-    delete data.app.loginName;
-    delete data.app.user;
-    localStorage.ambari = JSON.stringify(data);
-    $scope.hello = "hello";
     Auth.signout().finally(function() {
       $window.location.pathname = '';
     });
@@ -93,7 +87,6 @@ angular.module('ambariAdminConsole')
     var lastActiveTime = Date.now();
 
     var keepActive = function() {
-      //console.error('keepActive');
       if (active) {
         lastActiveTime = Date.now();
       }
@@ -104,15 +97,46 @@ angular.module('ambariAdminConsole')
     $(window).bind('click', keepActive);
 
     var checkActiveness = function() {
-      //console.error("checkActiveness " + lastActiveTime + " : " + Date.now());
-      if (Date.now() - lastActiveTime > TIME_OUT) {
-        //console.error("LOGOUT!");
+      var remainTime = TIME_OUT - (Date.now() - lastActiveTime);
+      if (remainTime < 0) {
         active = false;
         $(window).unbind('mousemove', keepActive);
         $(window).unbind('keypress', keepActive);
         $(window).unbind('click', keepActive);
         clearInterval($rootScope.userActivityTimeoutInterval);
         $scope.signOut();
+      } else if (remainTime < 60000 && !$rootScope.timeoutModal) {
+        $rootScope.timeoutModal = $modal.open({
+          templateUrl: 'views/modals/TimeoutWarning.html',
+          backdrop: false,
+          controller: ['$scope', 'Auth', function($scope, Auth) {
+            $scope.remainTime = 60;
+            $scope.title = 'Automatic Logout';
+            $scope.primaryText = 'Remain Logged In';
+            $scope.secondaryText = 'Log Out Now';
+            $scope.remain = function() {
+              $rootScope.timeoutModal.close();
+              delete $rootScope.timeoutModal;
+            };
+            $scope.logout = function() {
+              $rootScope.timeoutModal.close();
+              delete $rootScope.timeoutModal;
+              Auth.signout().finally(function() {
+                $window.location.pathname = '';
+              });
+            };
+            $scope.countDown = function() {
+              $scope.remainTime--;
+              $scope.$apply();
+              if ($scope.remainTime == 0) {
+                Auth.signout().finally(function() {
+                  $window.location.pathname = '';
+                });
+              }
+            };
+            setInterval($scope.countDown, 1000);
+          }]
+        });
       }
     };
     $rootScope.userActivityTimeoutInterval = window.setInterval(checkActiveness, 1000);
@@ -125,8 +149,9 @@ angular.module('ambariAdminConsole')
 
   if (!$rootScope.userActivityTimeoutInterval) {
     Cluster.getAmbariTimeout().then(function(timeout) {
-      if (Number(timeout) > 0)
-        $scope.startInactiveTimeoutMonitoring(timeout * 1000);
+      $rootScope.userTimeout = Number(timeout) * 1000;
+      if ($rootScope.userTimeout > 0)
+        $scope.startInactiveTimeoutMonitoring($rootScope.userTimeout);
     });
   }
   if (!$rootScope.noopPollingInterval) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac51d8b6/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Auth.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Auth.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Auth.js
index a73c540..14c04c1 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Auth.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Auth.js
@@ -29,6 +29,11 @@ angular.module('ambariAdminConsole')
   }
   return {
     signout: function() {
+      var data = JSON.parse(localStorage.ambari);
+      delete data.app.authenticated;
+      delete data.app.loginName;
+      delete data.app.user;
+      localStorage.ambari = JSON.stringify(data);
       // Workaround for sign off within Basic Authorization
       var origin = $window.location.protocol + '//' + Date.now() + ':' + Date.now() + '@' +
             $window.location.hostname + ($window.location.port ? ':' + $window.location.port : '');

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac51d8b6/ambari-admin/src/main/resources/ui/admin-web/app/views/modals/TimeoutWarning.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/modals/TimeoutWarning.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/modals/TimeoutWarning.html
new file mode 100644
index 0000000..ad5c3a0
--- /dev/null
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/modals/TimeoutWarning.html
@@ -0,0 +1,28 @@
+<!--
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+-->
+
+<div class="modal-header">
+  <h4 class="modal-title">{{title}}</h4>
+</div>
+<div class="modal-body">
+  You will be automatically logged out in <b>{{remainTime}}</b> seconds due to inactivity.
+</div>
+<div class="modal-footer">
+  <button class="btn btn-default" ng-click="logout()">{{secondaryText}}</button>
+  <button class="btn btn-primary" ng-click="remain()">{{primaryText}}</button>
+</div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac51d8b6/ambari-web/app/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/config.js b/ambari-web/app/config.js
index 26f0553..2ab8544 100644
--- a/ambari-web/app/config.js
+++ b/ambari-web/app/config.js
@@ -56,6 +56,7 @@ App.isStormMetricsSupported = true;
 App.healthStatusRed = '#ff0000';
 App.healthStatusGreen = '#5AB400';
 App.healthStatusOrange = '#FF8E00';
+App.inactivityRemainTime = 60; // in seconds
 
 App.stackVersionsAvailable = true;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac51d8b6/ambari-web/app/controllers/main.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main.js b/ambari-web/app/controllers/main.js
index fb73770..0f6e33a 100644
--- a/ambari-web/app/controllers/main.js
+++ b/ambari-web/app/controllers/main.js
@@ -25,6 +25,7 @@ App.MainController = Em.Controller.extend({
   checkActivenessInterval: null,
   lastUserActiveTime: null,
   userTimeOut: 0,
+  userTimeOutModal: null,
 
   updateTitle: function(){
     var name = App.router.get('clusterController.clusterName');
@@ -191,7 +192,6 @@ App.MainController = Em.Controller.extend({
   },
 
   monitorInactivity: function() {
-    //console.error('======MONITOR==START========');
     var timeout = Number(App.router.get('clusterController.ambariProperties')['user.inactivity.timeout.default']);
     var readonly_timeout = Number(App.router.get('clusterController.ambariProperties')['user.inactivity.timeout.role.readonly.default']);
     var isAdmin = App.get('isAdmin');
@@ -218,7 +218,6 @@ App.MainController = Em.Controller.extend({
   /* this will be triggerred by user driven events: 'mousemove', 'keypress' and 'click' */
   keepActive: function() {
     var scope = App.router.get('mainController');
-    //console.error('keepActive');
     if (scope.get('isUserActive')) {
       scope.set('lastUserActiveTime', Date.now());
     }
@@ -226,13 +225,48 @@ App.MainController = Em.Controller.extend({
 
   checkActiveness: function() {
     var scope = App.router.get('mainController');
-    //console.error("checkActiveness " + scope.get('lastUserActiveTime') + " : " + Date.now());
-    if (Date.now() - scope.get('lastUserActiveTime') > scope.get('userTimeOut') && !scope.isOnWizard()) {
-      scope.set('isUserActive', false);
-      //console.error("LOGOUT!");
-      scope.unbindActivityEventMonitors();
-      clearInterval(scope.get('checkActivenessInterval'));
-      App.router.logOff({});
+    if (!scope.isOnWizard()) {
+      var remainTime = scope.get('userTimeOut') - (Date.now() - scope.get('lastUserActiveTime'));
+      if (remainTime < 0) {
+        scope.set('isUserActive', false);
+        scope.unbindActivityEventMonitors();
+        clearInterval(scope.get('checkActivenessInterval'));
+        App.router.logOff({});
+      } else if (remainTime < App.inactivityRemainTime * 1000 && !scope.userTimeOutModal) {
+        // show alert 60 seconds before logging user out
+        scope.userTimeOutModal = App.ModalPopup.show({
+          primary: Em.I18n.t('common.timeout.warning.popup.primary'),
+          secondary: Em.I18n.t('common.timeout.warning.popup.secondary'),
+          third: false,
+          header: Em.I18n.t('common.timeout.warning.popup.header'),
+          showCloseButton: false,
+          bodyClass: Ember.View.extend({
+            template: Ember.Handlebars.compile('<p>{{view.beforeMsg}}<b>{{view.remainTime}}</b>{{view.afterMsg}}</p>'),
+            beforeMsg: Em.I18n.t('common.timeout.warning.popup.body.before'),
+            afterMsg: Em.I18n.t('common.timeout.warning.popup.body.after'),
+            remainTime: App.inactivityRemainTime,
+            didInsertElement: function() {
+              var self = this;
+              setInterval(function(){self.countDown();}, 1000)
+            },
+            countDown: function() {
+              this.set('remainTime', this.get('remainTime') - 1);
+              if (this.get('remainTime') == 0) {
+                App.router.logOff({});
+              }
+            }
+          }),
+          onPrimary: function() {
+            scope.userTimeOutModal.hide();
+            delete scope.userTimeOutModal;
+          },
+          onSecondary: function() {
+            scope.userTimeOutModal.hide();
+            delete scope.userTimeOutModal;
+            App.router.logOff({});
+          }
+        });
+      }
     }
   },
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ac51d8b6/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 1c19b7d..8ee266c 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -284,6 +284,11 @@ Em.I18n.translations = {
   'common.loading.eclipses': 'Loading...',
   'common.running': 'Running',
   'common.stopped': 'Stopped',
+  'common.timeout.warning.popup.header': 'Automatic Logout',
+  'common.timeout.warning.popup.body.before': 'You will be automatically logged out in ',
+  'common.timeout.warning.popup.body.after': ' seconds due to inactivity',
+  'common.timeout.warning.popup.primary': 'Remain Logged In',
+  'common.timeout.warning.popup.secondary': 'Log Out Now',
 
   'models.alert_instance.tiggered.verbose': "Occurred on {0} <br> Checked on {1}",
   'models.alert_definition.triggered.verbose': "Occurred on {0}",


[27/50] [abbrv] ambari git commit: AMBARI-13498. Passwords for components should not be readable by end-users (echekanskiy via dlysnichenko)

Posted by nc...@apache.org.
AMBARI-13498. Passwords for components should not be readable by end-users (echekanskiy via dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1ff22dff
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1ff22dff
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1ff22dff

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 1ff22dffec728f5df40f41aaf1fa7a5da3f98f35
Parents: 2ff92a9
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Thu Oct 22 19:46:13 2015 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Thu Oct 22 19:47:32 2015 +0300

----------------------------------------------------------------------
 .../AmbariManagementControllerImpl.java         |  48 +++++-
 .../controller/ConfigurationResponse.java       |  43 +++++-
 .../org/apache/ambari/server/state/Cluster.java |  19 ++-
 .../org/apache/ambari/server/state/Config.java  |   4 +
 .../apache/ambari/server/state/ConfigImpl.java  |  24 +++
 .../server/state/cluster/ClusterImpl.java       |  60 +++++++-
 .../ambari/server/utils/SecretReference.java    |  77 ++++++++++
 .../AmbariManagementControllerTest.java         | 145 +++++++++++++++++++
 .../services/HDFS/configuration/hdfs-site.xml   |   6 +
 9 files changed, 419 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1ff22dff/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 87e05c6..152016a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -161,6 +161,7 @@ import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEve
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStopEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostUpgradeEvent;
+import org.apache.ambari.server.utils.SecretReference;
 import org.apache.ambari.server.utils.StageUtils;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.io.IOUtils;
@@ -719,6 +720,27 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
     Cluster cluster = clusters.getCluster(request.getClusterName());
 
+    Map<String, String> requestProperties = request.getProperties();
+
+    Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes = cluster.getConfigPropertiesTypes(request.getType());
+    if(propertiesTypes.containsKey(PropertyType.PASSWORD)) {
+      for(String passwordProperty : propertiesTypes.get(PropertyType.PASSWORD)) {
+        if(requestProperties.containsKey(passwordProperty)) {
+          String passwordPropertyValue = requestProperties.get(passwordProperty);
+          if (!SecretReference.isSecret(passwordPropertyValue))
+            continue;
+          SecretReference ref = new SecretReference(passwordPropertyValue, passwordProperty, cluster);
+          if (!ref.getClusterName().equals(request.getClusterName()))
+            throw new AmbariException("Can not reference to different cluster in SECRET");
+          String refValue = ref.getValue();
+          requestProperties.put(passwordProperty, refValue);
+        }
+      }
+    }
+
+
+
+
     Map<String, Config> configs = cluster.getConfigsByType(
         request.getType());
     if (null == configs) {
@@ -739,7 +761,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
     handleGlobalsBackwardsCompability(request, propertiesAttributes);
 
-    Config config = createConfig(cluster, request.getType(), request.getProperties(),
+    Config config = createConfig(cluster, request.getType(), requestProperties,
       request.getVersionTag(), propertiesAttributes);
 
     return new ConfigurationResponse(cluster.getClusterName(), config);
@@ -1215,7 +1237,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
                 request.getType(),
                 config.getTag(), entry.getValue().getVersion(),
                 includeProps ? config.getProperties() : new HashMap<String, String>(),
-                includeProps ? config.getPropertiesAttributes() : new HashMap<String, Map<String,String>>());
+                includeProps ? config.getPropertiesAttributes() : new HashMap<String, Map<String,String>>(),
+                config.getPropertiesTypes());
             responses.add(response);
           }
         }
@@ -1228,7 +1251,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
               cluster.getClusterName(), config.getStackId(), config.getType(),
               config.getTag(), config.getVersion(),
               includeProps ? config.getProperties() : new HashMap<String, String>(),
-              includeProps ? config.getPropertiesAttributes() : new HashMap<String, Map<String,String>>());
+              includeProps ? config.getPropertiesAttributes() : new HashMap<String, Map<String,String>>(),
+              config.getPropertiesTypes());
 
           responses.add(response);
         }
@@ -1365,6 +1389,24 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     if (request.getDesiredConfig() != null) {
       for (ConfigurationRequest desiredConfig : request.getDesiredConfig()) {
         Map<String, String> requestConfigProperties = desiredConfig.getProperties();
+
+        // processing password properties
+        if(requestConfigProperties != null && !requestConfigProperties.isEmpty()) {
+          Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes = cluster.getConfigPropertiesTypes(
+              desiredConfig.getType()
+          );
+          for (Entry<String, String> property : requestConfigProperties.entrySet()) {
+            String propertyName = property.getKey();
+            String propertyValue = property.getValue();
+            if (propertiesTypes.containsKey(PropertyType.PASSWORD) &&
+                propertiesTypes.get(PropertyType.PASSWORD).contains(propertyName)) {
+              if (SecretReference.isSecret(propertyValue)) {
+                SecretReference ref = new SecretReference(propertyValue, propertyName, cluster);
+                requestConfigProperties.put(propertyName, ref.getValue());
+              }
+            }
+          }
+        }
         Map<String,Map<String,String>> requestConfigAttributes = desiredConfig.getPropertiesAttributes();
         Config clusterConfig = cluster.getDesiredConfigByType(desiredConfig.getType());
         Map<String, String> clusterConfigProperties = null;

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ff22dff/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationResponse.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationResponse.java
index d6b95c8..3ed9306 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationResponse.java
@@ -19,9 +19,12 @@ package org.apache.ambari.server.controller;
 
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.utils.SecretReference;
 
 /**
  * This class encapsulates a configuration update request.
@@ -46,6 +49,8 @@ public class ConfigurationResponse {
 
   private Map<String, Map<String, String>> configAttributes;
 
+  private Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes;
+
   public ConfigurationResponse(String clusterName, StackId stackId,
       String type, String versionTag, Long version,
       Map<String, String> configs,
@@ -60,6 +65,23 @@ public class ConfigurationResponse {
     this.configAttributes = configAttributes;
   }
 
+  public ConfigurationResponse(String clusterName, StackId stackId,
+                               String type, String versionTag, Long version,
+                               Map<String, String> configs,
+                               Map<String, Map<String, String>> configAttributes,
+                               Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes) {
+    this.clusterName = clusterName;
+    this.stackId = stackId;
+    this.configs = configs;
+    this.type = type;
+    this.versionTag = versionTag;
+    this.version = version;
+    this.configs = configs;
+    this.configAttributes = configAttributes;
+    this.propertiesTypes = propertiesTypes;
+    stubPasswords();
+  }
+
   /**
    * Constructor.
    *
@@ -69,7 +91,7 @@ public class ConfigurationResponse {
   public ConfigurationResponse(String clusterName, Config config) {
     this(clusterName, config.getStackId(), config.getType(), config.getTag(),
         config.getVersion(), config.getProperties(),
-        config.getPropertiesAttributes());
+        config.getPropertiesAttributes(), config.getPropertiesTypes());
   }
 
   /**
@@ -185,4 +207,23 @@ public class ConfigurationResponse {
   public void setServiceConfigVersions(List<Long> serviceConfigVersions) {
     this.serviceConfigVersions = serviceConfigVersions;
   }
+
+  public Map<PropertyInfo.PropertyType, Set<String>> getPropertiesTypes() {
+    return propertiesTypes;
+  }
+
+  public void setPropertiesTypes(Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes) {
+    this.propertiesTypes = propertiesTypes;
+  }
+
+  private void stubPasswords(){
+    if(propertiesTypes != null && propertiesTypes.containsKey(PropertyInfo.PropertyType.PASSWORD)) {
+      for(String pwdPropertyName: propertiesTypes.get(PropertyInfo.PropertyType.PASSWORD)) {
+        if(configs.containsKey(pwdPropertyName)){
+          String stub = SecretReference.generateStub(clusterName, type, version);
+          configs.put(pwdPropertyName, stub);
+        }
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ff22dff/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
index 0f259d5..f32e552 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
@@ -301,16 +301,33 @@ public interface Cluster {
   Map<String, Config> getConfigsByType(String configType);
 
   /**
+   * Gets all properties types that mach the specified type.
+   * @param configType the config type to return
+   * @return properties types for given config type
+   */
+  Map<PropertyInfo.PropertyType, Set<String>> getConfigPropertiesTypes(String configType);
+
+  /**
    * Gets the specific config that matches the specified type and tag.  This not
    * necessarily a DESIRED configuration that applies to a cluster.
    * @param configType  the config type to find
-   * @param versionTag  the config version to find
+   * @param versionTag  the config version tag to find
    * @return  a {@link Config} object, or <code>null</code> if the specific type
    *          and version have not been set.
    */
   Config getConfig(String configType, String versionTag);
 
   /**
+   * Gets the specific config that matches the specified type and version.  This not
+   * necessarily a DESIRED configuration that applies to a cluster.
+   * @param configType  the config type to find
+   * @param configVersion  the config version to find
+   * @return  a {@link Config} object, or <code>null</code> if the specific type
+   *          and version have not been set.
+   */
+  Config getConfigByVersion(String configType, Long configVersion);
+
+  /**
    * Sets a specific config.  NOTE:  This is not a DESIRED configuration that
    * applies to a cluster.
    * @param config  the config instance to add

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ff22dff/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
index e18505a..b35aad9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
@@ -20,11 +20,15 @@ package org.apache.ambari.server.state;
 
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 /**
  * Represents a single instance of a 'Config Type'
  */
 public interface Config {
+  Map<PropertyInfo.PropertyType, Set<String>> getPropertiesTypes();
+
+  void setPropertiesTypes(Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes);
 
   void setStackId(StackId stackId);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ff22dff/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
index ea6aecd..2cc3d00 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
@@ -23,6 +23,7 @@ import java.util.Date;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
@@ -58,6 +59,7 @@ public class ConfigImpl implements Config {
   private volatile Map<String, String> properties;
   private volatile Map<String, Map<String, String>> propertiesAttributes;
   private ClusterConfigEntity entity;
+  private volatile Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes;
 
   @Inject
   private ClusterDAO clusterDAO;
@@ -81,6 +83,7 @@ public class ConfigImpl implements Config {
     stackId = cluster.getDesiredStackVersion();
 
     injector.injectMembers(this);
+    propertiesTypes = cluster.getConfigPropertiesTypes(type);
   }
 
 
@@ -96,6 +99,7 @@ public class ConfigImpl implements Config {
 
     this.entity = entity;
     injector.injectMembers(this);
+    propertiesTypes = cluster.getConfigPropertiesTypes(type);
   }
 
   /**
@@ -120,6 +124,26 @@ public class ConfigImpl implements Config {
   }
 
   @Override
+  public Map<PropertyInfo.PropertyType, Set<String>> getPropertiesTypes() {
+    readWriteLock.readLock().lock();
+    try {
+      return propertiesTypes;
+    } finally {
+      readWriteLock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public void setPropertiesTypes(Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes) {
+    readWriteLock.writeLock().lock();
+    try {
+      this.propertiesTypes = propertiesTypes;
+    } finally {
+      readWriteLock.writeLock().unlock();
+    }
+  }
+
+  @Override
   public void setStackId(StackId stackId) {
     readWriteLock.writeLock().lock();
     try {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ff22dff/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index e3bb320..279b31f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -109,6 +109,8 @@ import org.apache.ambari.server.state.ServiceFactory;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
 import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
@@ -253,6 +255,8 @@ public class ClusterImpl implements Cluster {
 
   private volatile Multimap<String, String> serviceConfigTypes;
 
+  private Map<String, Map<PropertyInfo.PropertyType, Set<String>>> configProperiesTypesCache;
+
   @Inject
   public ClusterImpl(@Assisted ClusterEntity clusterEntity,
                      Injector injector) throws AmbariException {
@@ -267,6 +271,8 @@ public class ClusterImpl implements Cluster {
 
     desiredStackVersion = new StackId(clusterEntity.getDesiredStack());
 
+    configProperiesTypesCache = new HashMap<>();
+
     cacheConfigurations();
 
     if (desiredStackVersion != null && !StringUtils.isEmpty(desiredStackVersion.getStackName()) && !
@@ -1251,7 +1257,7 @@ public class ClusterImpl implements Cluster {
 
     // Also returns when have a mix of CURRENT and INSTALLING|INSTALLED|UPGRADING|UPGRADED
     LOG.warn("have a mix of CURRENT and INSTALLING|INSTALLED|UPGRADING|UPGRADED host versions, " +
-            "returning OUT_OF_SYNC as cluster version. Host version states: " + stateToHosts.toString());
+        "returning OUT_OF_SYNC as cluster version. Host version states: " + stateToHosts.toString());
     return RepositoryVersionState.OUT_OF_SYNC;
   }
 
@@ -1779,6 +1785,23 @@ public class ClusterImpl implements Cluster {
   }
 
   @Override
+  public Config getConfigByVersion(String configType, Long configVersion) {
+    clusterGlobalLock.readLock().lock();
+    try {
+      if (!allConfigs.containsKey(configType)) {
+        return null;
+      }
+      for(Map.Entry<String, Config> entry: allConfigs.get(configType).entrySet()) {
+        if(entry.getValue().getVersion().equals(configVersion))
+          return entry.getValue();
+      }
+      return null;
+    } finally {
+      clusterGlobalLock.readLock().unlock();
+    }
+  }
+
+  @Override
   public void addConfig(Config config) {
     clusterGlobalLock.writeLock().lock();
     try {
@@ -2205,7 +2228,7 @@ public class ClusterImpl implements Cluster {
           serviceConfigVersionResponse.getConfigurations().add(
               new ConfigurationResponse(getClusterName(), config.getStackId(),
                   config.getType(), config.getTag(), config.getVersion(),
-                  config.getProperties(), config.getPropertiesAttributes()));
+                  config.getProperties(), config.getPropertiesAttributes(), config.getPropertiesTypes()));
         }
 
         serviceConfigVersionResponses.add(serviceConfigVersionResponse);
@@ -2911,6 +2934,39 @@ public class ClusterImpl implements Cluster {
    * {@inheritDoc}
    */
   @Override
+  public synchronized Map<PropertyInfo.PropertyType, Set<String>> getConfigPropertiesTypes(String configType){
+    if(configProperiesTypesCache.containsKey(configType)) {
+      return configProperiesTypesCache.get(configType);
+    } else {
+      Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes = new HashMap<>();
+      try {
+        StackId stackId = this.getCurrentStackVersion();
+        StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());
+        Collection<ServiceInfo> services = stackInfo.getServices();
+        for (ServiceInfo serviceInfo : services) {
+          for (PropertyInfo propertyInfo : serviceInfo.getProperties()) {
+            if (propertyInfo.getFilename().contains(configType) && !propertyInfo.getPropertyTypes().isEmpty()) {
+              Set<PropertyInfo.PropertyType> types = propertyInfo.getPropertyTypes();
+              for (PropertyInfo.PropertyType propertyType : types) {
+                if (!propertiesTypes.containsKey(propertyType))
+                  propertiesTypes.put(propertyType, new HashSet<String>());
+                propertiesTypes.get(propertyType).add(propertyInfo.getName());
+              }
+            }
+          }
+        }
+      } catch (Exception e) {
+
+      }
+      configProperiesTypesCache.put(configType, propertiesTypes);
+      return propertiesTypes;
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
   @Transactional
   public void removeConfigurations(StackId stackId) {
     clusterGlobalLock.writeLock().lock();

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ff22dff/ambari-server/src/main/java/org/apache/ambari/server/utils/SecretReference.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/SecretReference.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/SecretReference.java
new file mode 100644
index 0000000..2b1aeae
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/SecretReference.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.utils;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Config;
+
+import java.util.Map;
+
+public class SecretReference {
+  private String clusterName;
+  private String configType;
+  private Long version;
+  private String value;
+  private String reference;
+
+  public SecretReference(String reference, String propertyName, Cluster cluster) throws AmbariException{
+    String[] values = reference.split(":");
+    clusterName = values[1];
+    configType = values[2];
+    version = Long.valueOf(values[3]);
+    Config refConfig = cluster.getConfigByVersion(configType, version);
+
+    if(refConfig == null)
+      throw new AmbariException(String.format("Cluster: %s does not contain ConfigType: %s ConfigVersion: %s",
+          cluster.getClusterName(), configType, version));
+    Map<String, String> refProperties = refConfig.getProperties();
+    if(!refProperties.containsKey(propertyName))
+      throw new AmbariException(String.format("Cluster: %s ConfigType: %s ConfigVersion: %s does not contain property '%s'",
+          cluster.getClusterName(), configType, version, propertyName));
+    this.value = refProperties.get(propertyName);
+
+    this.reference = reference;
+  }
+
+  public String getClusterName() {
+    return clusterName;
+  }
+
+  public void setConfigType(String configType) {
+    this.configType = configType;
+  }
+
+  public Long getVersion() {
+    return version;
+  }
+
+  public String getValue() {
+    return value;
+  }
+
+  public static boolean isSecret(String value) {
+    String[] values = value.split(":");
+    return values.length == 4 && values[0].equals("SECRET");
+  }
+
+  public static String generateStub(String clusterName, String configType, Long configVersion) {
+    return "SECRET:" + clusterName + ":" + configType + ":" + configVersion.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ff22dff/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 7e2090a..af8c5e2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -10539,6 +10539,151 @@ public class AmbariManagementControllerTest {
   }
 
   @Test
+  public void testSecretReferences() throws AmbariException {
+
+    final String host1 = "h1";
+    final String host2 = "h2";
+    Long clusterId = 1L;
+    String clusterName = "foo1";
+    Cluster cl = setupClusterWithHosts(clusterName, "HDP-2.0.5", new ArrayList<String>() {
+      {
+        add(host1);
+        add(host2);
+      }
+    }, "centos5");
+    String serviceName = "HDFS";
+    createService(clusterName, serviceName, null);
+    String componentName1 = "NAMENODE";
+    String componentName2 = "DATANODE";
+    String componentName3 = "HDFS_CLIENT";
+
+    createServiceComponent(clusterName, serviceName, componentName1, State.INIT);
+    createServiceComponent(clusterName, serviceName, componentName2, State.INIT);
+    createServiceComponent(clusterName, serviceName, componentName3, State.INIT);
+
+    createServiceComponentHost(clusterName, serviceName, componentName1, host1, null);
+    createServiceComponentHost(clusterName, serviceName, componentName2, host1, null);
+    createServiceComponentHost(clusterName, serviceName, componentName3, host1, null);
+    createServiceComponentHost(clusterName, serviceName, componentName2, host2, null);
+    createServiceComponentHost(clusterName, serviceName, componentName3, host2, null);
+
+    // Install
+    installService(clusterName, serviceName, false, false);
+
+    ClusterRequest crReq;
+    ConfigurationRequest cr;
+
+    cr = new ConfigurationRequest(clusterName,
+        "hdfs-site",
+        "version1",
+        new HashMap<String, String>(){{
+          put("test.password", "first");
+        }},
+        new HashMap<String, Map<String, String>>()
+    );
+    crReq = new ClusterRequest(clusterId, clusterName, null, null);
+    crReq.setDesiredConfig(Collections.singletonList(cr));
+    controller.updateClusters(Collections.singleton(crReq), null);
+    // update config with secret reference
+    cr = new ConfigurationRequest(clusterName,
+        "hdfs-site",
+        "version2",
+        new HashMap<String, String>(){{
+          put("test.password", "SECRET:c1:hdfs-site:1");
+          put("new", "new");//need this to mark config as "changed"
+        }},
+        new HashMap<String, Map<String, String>>()
+    );
+    crReq = new ClusterRequest(clusterId, clusterName, null, null);
+    crReq.setDesiredConfig(Collections.singletonList(cr));
+    controller.updateClusters(Collections.singleton(crReq), null);
+    // change password to new value
+    cr = new ConfigurationRequest(clusterName,
+        "hdfs-site",
+        "version3",
+        new HashMap<String, String>(){{
+          put("test.password", "brandNewPassword");
+        }},
+        new HashMap<String, Map<String, String>>()
+    );
+    crReq = new ClusterRequest(clusterId, clusterName, null, null);
+    crReq.setDesiredConfig(Collections.singletonList(cr));
+    controller.updateClusters(Collections.singleton(crReq), null);
+    // wrong secret reference
+    cr = new ConfigurationRequest(clusterName,
+        "hdfs-site",
+        "version3",
+        new HashMap<String, String>(){{
+          put("test.password", "SECRET:c1:hdfs-site:666");
+        }},
+        new HashMap<String, Map<String, String>>()
+    );
+    crReq = new ClusterRequest(clusterId, clusterName, null, null);
+    crReq.setDesiredConfig(Collections.singletonList(cr));
+    try {
+      controller.updateClusters(Collections.singleton(crReq), null);
+      fail("Request need to be failed with wrong secret reference");
+    } catch (AmbariException e){
+
+    }
+    // reference to config which does not contain requested property
+    cr = new ConfigurationRequest(clusterName,
+        "hdfs-site",
+        "version4",
+        new HashMap<String, String>(){{
+          put("foo", "bar");
+        }},
+        new HashMap<String, Map<String, String>>()
+    );
+    crReq = new ClusterRequest(clusterId, clusterName, null, null);
+    crReq.setDesiredConfig(Collections.singletonList(cr));
+    controller.updateClusters(Collections.singleton(crReq), null);
+    cr = new ConfigurationRequest(clusterName,
+        "hdfs-site",
+        "version5",
+        new HashMap<String, String>(){{
+          put("test.password", "SECRET:c1:hdfs-site:4");
+          put("new", "new");
+        }},
+        new HashMap<String, Map<String, String>>()
+    );
+    crReq = new ClusterRequest(clusterId, clusterName, null, null);
+    crReq.setDesiredConfig(Collections.singletonList(cr));
+    try {
+      controller.updateClusters(Collections.singleton(crReq), null);
+      fail("Request need to be failed with wrong secret reference");
+    } catch (AmbariException e) {
+      assertEquals("Cluster: foo1 ConfigType: hdfs-site ConfigVersion: 4 does not contain property 'test.password'",
+          e.getMessage());
+    }
+    cl.getAllConfigs();
+    assertEquals(cl.getAllConfigs().size(), 4);
+
+    Config v1 = cl.getConfigByVersion("hdfs-site", 1l);
+    Config v2 = cl.getConfigByVersion("hdfs-site", 2l);
+    Config v3 = cl.getConfigByVersion("hdfs-site", 3l);
+    Config v4 = cl.getConfigByVersion("hdfs-site", 4l);
+
+    assertEquals(v1.getProperties().get("test.password"), "first");
+    assertEquals(v2.getProperties().get("test.password"), "first");
+    assertEquals(v3.getProperties().get("test.password"), "brandNewPassword");
+    assertFalse(v4.getProperties().containsKey("test.password"));
+
+    // check if we have masked secret in responce
+    final ConfigurationRequest configRequest = new ConfigurationRequest(clusterName, "hdfs-site", null, null, null);
+    configRequest.setIncludeProperties(true);
+    Set<ConfigurationResponse> requestedConfigs = controller.getConfigurations(new HashSet<ConfigurationRequest>() {{
+      add(configRequest);
+    }});
+    for(ConfigurationResponse resp : requestedConfigs) {
+      String secretName = "SECRET:foo1:hdfs-site:"+resp.getVersion().toString();
+      if(resp.getConfigs().containsKey("test.password")) {
+        assertEquals(resp.getConfigs().get("test.password"), secretName);
+      }
+    }
+  }
+
+  @Test
   public void testTargetedProcessCommand() throws Exception {
     final String host1 = "h1";
     String clusterName = "c1";

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ff22dff/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
index 246b2f9..f53c667 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
@@ -23,6 +23,12 @@
 <configuration>
 
 <!-- file system properties -->
+  <property>
+    <name>test.password</name>
+    <property-type>PASSWORD</property-type>
+    <value>test</value>
+    <description>1</description>
+  </property>
 
   <property>
     <name>dfs.namenode.name.dir</name>