You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2016/02/16 19:30:06 UTC

[01/50] [abbrv] ambari git commit: Revert "AMBARI-14936. Tweaks to reduce build time (aonishuk)"

Repository: ambari
Updated Branches:
  refs/heads/branch-dev-patch-upgrade 1e89d1d57 -> 718f2ea18


Revert "AMBARI-14936. Tweaks to reduce build time (aonishuk)"

This reverts commit dc8e5c3c68d25e74ab875a87f66959fc86a9631c.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/58fe67c1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/58fe67c1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/58fe67c1

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 58fe67c199b9dde7cd0963c9567e728db983def0
Parents: fdb101b
Author: Alex Antonenko <hi...@gmail.com>
Authored: Thu Feb 11 18:14:51 2016 +0200
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Thu Feb 11 18:17:50 2016 +0200

----------------------------------------------------------------------
 ambari-agent/pom.xml                         | 40 +++++++++++------------
 ambari-server/pom.xml                        |  6 ++--
 ambari-server/src/main/assemblies/server.xml |  2 +-
 ambari-web/pom.xml                           |  3 +-
 pom.xml                                      | 18 +++++++++-
 5 files changed, 42 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/58fe67c1/ambari-agent/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-agent/pom.xml b/ambari-agent/pom.xml
index c2c993f..23d2969 100644
--- a/ambari-agent/pom.xml
+++ b/ambari-agent/pom.xml
@@ -86,6 +86,24 @@
         <version>3.0</version>
       </plugin>
       <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <tarLongFileMode>gnu</tarLongFileMode>
+          <descriptors>
+            <descriptor>src/packages/tarball/all.xml</descriptor>
+          </descriptors>
+        </configuration>
+        <executions>
+          <execution>
+            <id>build-tarball</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-surefire-plugin</artifactId>
         <configuration>
@@ -257,24 +275,6 @@
         </configuration>
       </plugin>
       <plugin>
-        <artifactId>maven-assembly-plugin</artifactId>
-        <configuration>
-          <tarLongFileMode>gnu</tarLongFileMode>
-          <descriptors>
-            <descriptor>src/packages/tarball/all.xml</descriptor>
-          </descriptors>
-        </configuration>
-        <executions>
-          <execution>
-            <id>build-tarball</id>
-            <phase>${assemblyPhase}</phase>
-            <goals>
-              <goal>single</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
         <artifactId>maven-resources-plugin</artifactId>
         <version>2.6</version>
         <executions>
@@ -347,7 +347,7 @@
           </execution>
           <execution>
             <id>copy-repo-resources</id>
-            <phase>${assemblyPhase}</phase>
+            <phase>package</phase>
             <goals>
               <goal>copy-resources</goal>
             </goals>
@@ -391,7 +391,7 @@
         <executions>
           <execution>
             <id>rename-file</id>
-            <phase>${assemblyPhase}</phase>
+            <phase>package</phase>
             <goals>
               <goal>rename</goal>
             </goals>

http://git-wip-us.apache.org/repos/asf/ambari/blob/58fe67c1/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 51fd88b..5a95ec4 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -134,7 +134,7 @@
         <executions>
           <execution>
             <id>build-tarball</id>
-            <phase>${assemblyPhase}</phase>
+            <phase>package</phase>
             <goals>
               <goal>single</goal>
             </goals>
@@ -173,7 +173,7 @@
           </execution>
           <execution>
             <id>copy-repo-resources</id>
-            <phase>${assemblyPhase}</phase>
+            <phase>package</phase>
             <goals>
               <goal>copy-resources</goal>
             </goals>
@@ -217,7 +217,7 @@
         <executions>
           <execution>
             <id>rename-file</id>
-            <phase>${assemblyPhase}</phase>
+            <phase>package</phase>
             <goals>
               <goal>rename</goal>
             </goals>

http://git-wip-us.apache.org/repos/asf/ambari/blob/58fe67c1/ambari-server/src/main/assemblies/server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/assemblies/server.xml b/ambari-server/src/main/assemblies/server.xml
index ca74185..a75de79 100644
--- a/ambari-server/src/main/assemblies/server.xml
+++ b/ambari-server/src/main/assemblies/server.xml
@@ -301,7 +301,7 @@
     </file>
     <file>
       <fileMode>755</fileMode>
-      <source>src/main/resources/stacks/stack_advisor.py</source>
+      <source>target/classes/stacks/stack_advisor.py</source>
       <outputDirectory>/var/lib/ambari-server/resources/stacks</outputDirectory>
     </file>
     <file>

http://git-wip-us.apache.org/repos/asf/ambari/blob/58fe67c1/ambari-web/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-web/pom.xml b/ambari-web/pom.xml
index 6304b3d..761a3f0 100644
--- a/ambari-web/pom.xml
+++ b/ambari-web/pom.xml
@@ -32,7 +32,6 @@
   <properties>
     <ambari.dir>${project.parent.parent.basedir}</ambari.dir>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-    <nodemodules.dir>node_modules</nodemodules.dir> <!-- specify -Dnodemodules.dir option to reduce ambari-web build time by not re-downloading npm modules -->
   </properties>
   <build>
     <plugins>
@@ -101,7 +100,7 @@
             <configuration>
               <executable>${executable.rmdir}</executable>
               <workingDirectory>${basedir}</workingDirectory>
-              <commandlineArgs>${args.rm.clean} public ${nodemodules.dir}</commandlineArgs>
+              <commandlineArgs>${args.rm.clean} public node_modules</commandlineArgs>
               <successCodes>
                 <successCode>0</successCode>
                 <successCode>1</successCode>

http://git-wip-us.apache.org/repos/asf/ambari/blob/58fe67c1/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 8f321fd..57c6de7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -60,7 +60,6 @@
     <distMgmtStagingId>apache.staging.https</distMgmtStagingId>
     <distMgmtStagingName>Apache Release Distribution Repository</distMgmtStagingName>
     <distMgmtStagingUrl>https://repository.apache.org/service/local/staging/deploy/maven2</distMgmtStagingUrl>
-    <assemblyPhase>package</assemblyPhase> <!-- use -DassemblyPhase=none to skip building tarball, useful when you want purely compile jar -->
   </properties>
   <pluginRepositories>
     <pluginRepository>
@@ -204,6 +203,23 @@
         </configuration>
       </plugin>
       <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <descriptors>
+            <descriptor>${ambari.dir}/ambari-project/src/main/assemblies/empty.xml</descriptor>
+          </descriptors>
+        </configuration>
+        <executions>
+          <execution>
+            <id>make-assembly</id>
+            <phase>package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>rpm-maven-plugin</artifactId>
         <version>2.0.1</version>


[27/50] [abbrv] ambari git commit: AMBARI-15027 Sometimes Yarn graphs, Heatmaps take longer to load. (atkach)

Posted by jo...@apache.org.
AMBARI-15027 Sometimes Yarn graphs,Heatmaps take longer to load. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ed55354a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ed55354a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ed55354a

Branch: refs/heads/branch-dev-patch-upgrade
Commit: ed55354abaa8bf53ad0c1fda0d935976baf4c53d
Parents: aac5389
Author: Andrii Tkach <at...@hortonworks.com>
Authored: Fri Feb 12 16:11:18 2016 +0200
Committer: Andrii Tkach <at...@hortonworks.com>
Committed: Fri Feb 12 22:38:32 2016 +0200

----------------------------------------------------------------------
 .../app/mixins/common/widgets/widget_mixin.js   | 29 ++++++++++++--------
 .../views/common/widget/graph_widget_view.js    | 18 +++++++-----
 .../test/mixins/common/widget_mixin_test.js     |  2 +-
 3 files changed, 30 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ed55354a/ambari-web/app/mixins/common/widgets/widget_mixin.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/widgets/widget_mixin.js b/ambari-web/app/mixins/common/widgets/widget_mixin.js
index b6fe353..6d65c33 100644
--- a/ambari-web/app/mixins/common/widgets/widget_mixin.js
+++ b/ambari-web/app/mixins/common/widgets/widget_mixin.js
@@ -204,7 +204,9 @@ App.WidgetMixin = Ember.Mixin.create({
   },
 
   /**
-   *  aggregate all metric names in the query. Add time range and step to temporal queries
+   * aggregate all metric names in the query. Add time range and step to temporal queries
+   * @param {Array} metricPaths
+   * @returns {string}
    */
   prepareMetricPaths: function(metricPaths) {
     var temporalMetrics = metricPaths.filterProperty('metric_type', 'TEMPORAL');
@@ -225,15 +227,20 @@ App.WidgetMixin = Ember.Mixin.create({
    * @returns {$.ajax}
    */
   getHostComponentMetrics: function (request) {
-    return App.ajax.send({
-      name: 'widgets.hostComponent.metrics.get',
-      sender: this,
-      data: {
-        componentName: request.component_name,
-        metricPaths: this.prepareMetricPaths(request.metric_paths),
-        hostComponentCriteria: this.computeHostComponentCriteria(request)
-      }
-    });
+    var metricPaths = this.prepareMetricPaths(request.metric_paths);
+
+    if (metricPaths.length) {
+      return App.ajax.send({
+        name: 'widgets.hostComponent.metrics.get',
+        sender: this,
+        data: {
+          componentName: request.component_name,
+          metricPaths: this.prepareMetricPaths(request.metric_paths),
+          hostComponentCriteria: this.computeHostComponentCriteria(request)
+        }
+      });
+    }
+    return jQuery.Deferred().reject().promise();
   },
 
   getHostComponentMetricsSuccessCallback: function (data) {
@@ -771,7 +778,7 @@ App.WidgetLoadAggregator = Em.Object.create({
                 subRequest.errorCallback.call(subRequest.context, xhr, textStatus, errorThrown);
               }
             }, this);
-          }).complete(function () {
+          }).always(function () {
               _request.subRequests.forEach(function (subRequest) {
                 subRequest.completeCallback.call(subRequest.context);
               }, this);

http://git-wip-us.apache.org/repos/asf/ambari/blob/ed55354a/ambari-web/app/views/common/widget/graph_widget_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/widget/graph_widget_view.js b/ambari-web/app/views/common/widget/graph_widget_view.js
index 6feaa28..30eb658 100644
--- a/ambari-web/app/views/common/widget/graph_widget_view.js
+++ b/ambari-web/app/views/common/widget/graph_widget_view.js
@@ -211,13 +211,17 @@ App.GraphWidgetView = Em.View.extend(App.WidgetMixin, App.ExportMetricsMixin, {
       step = this.get('timeStep'),
       timeRange = this.get('timeRange'),
       result = [],
-      targetView = this.get('exportTargetView.isPopup') ? this.get('exportTargetView') : this.get('parentView'),
-      customStartTime = targetView.get('customStartTime'),
-      customEndTime = targetView.get('customEndTime');
-    if (timeRange === 0 && !Em.isNone(customStartTime) && !Em.isNone(customEndTime)) {
-      // Custom start and end time is specified by user
-      toSeconds = customEndTime / 1000;
-      fromSeconds = customStartTime / 1000;
+      targetView = this.get('exportTargetView.isPopup') ? this.get('exportTargetView') : this.get('parentView');
+
+    //if view destroyed then no metrics should be asked
+    if (Em.isNone(targetView)) return result;
+
+    if (timeRange === 0 &&
+      !Em.isNone(targetView.get('customStartTime')) &&
+      !Em.isNone(targetView.get('customEndTime'))) {
+      // Custom start/end time is specified by user
+      toSeconds = targetView.get('customEndTime') / 1000;
+      fromSeconds = targetView.get('customStartTime') / 1000;
     } else {
       // Preset time range is specified by user
       toSeconds = Math.round(App.dateTime() / 1000);

http://git-wip-us.apache.org/repos/asf/ambari/blob/ed55354a/ambari-web/test/mixins/common/widget_mixin_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/mixins/common/widget_mixin_test.js b/ambari-web/test/mixins/common/widget_mixin_test.js
index 91f628f..e32026a 100644
--- a/ambari-web/test/mixins/common/widget_mixin_test.js
+++ b/ambari-web/test/mixins/common/widget_mixin_test.js
@@ -626,7 +626,7 @@ describe('App.WidgetLoadAggregator', function () {
         return {
           done: Em.K,
           fail: Em.K,
-          complete: Em.K
+          always: Em.K
         }
       },
       state: 'inDOM'


[17/50] [abbrv] ambari git commit: AMBARI-14715. TimelineServer configuration is missing in yarn-env.xml. (Akira Ajisaka via yusaku)

Posted by jo...@apache.org.
AMBARI-14715. TimelineServer configuration is missing in yarn-env.xml. (Akira Ajisaka via yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6eed3332
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6eed3332
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6eed3332

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 6eed33322291c90c03386b72806fc3be461df5ec
Parents: 6ffe514
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Thu Feb 11 19:10:13 2016 -0800
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Thu Feb 11 19:10:13 2016 -0800

----------------------------------------------------------------------
 .../stacks/HDP/2.3/services/YARN/configuration/yarn-env.xml    | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6eed3332/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-env.xml
index e802b24..6bc283d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-env.xml
@@ -102,14 +102,14 @@
       # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
       export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
 
-      # Specify the max Heapsize for the HistoryManager using a numerical value
+      # Specify the max Heapsize for the timeline server using a numerical value
       # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
       # the value to 1024.
       # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_HISTORYSERVER_OPTS.
+      # and/or YARN_TIMELINESERVER_OPTS.
       # If not specified, the default value will be picked from either YARN_HEAPMAX
       # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+      export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}
 
       # Specify the JVM options to be used when starting the NodeManager.
       # These options will be appended to the options specified as YARN_OPTS


[30/50] [abbrv] ambari git commit: Revert "AMBARI-15014 Incorrect #of required properties at Ranger Customize Services page (Next button not enabled). (ababiichuk)"

Posted by jo...@apache.org.
Revert "AMBARI-15014 Incorrect #of required properties at Ranger Customize Services page (Next button not enabled). (ababiichuk)"

This reverts commit 2ca172d4ce7fa6e025ce512212b07eab25f38bcc.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1319763e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1319763e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1319763e

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 1319763ec7aa4e436ce2ff30d138180d2a2194b1
Parents: 9e5dd9f
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Sat Feb 13 13:04:29 2016 +0530
Committer: Jaimin Jetly <ja...@hortonworks.com>
Committed: Sat Feb 13 13:04:29 2016 +0530

----------------------------------------------------------------------
 .../app/controllers/wizard/step7_controller.js  |  5 ++-
 ambari-web/app/mixins/common/serverValidator.js |  2 +-
 .../models/configs/objects/service_config.js    |  6 ++-
 .../configs/objects/service_config_property.js  |  6 +++
 .../app/models/configs/theme/sub_section.js     | 12 +++--
 .../app/models/configs/theme/sub_section_tab.js |  6 ++-
 .../config_recommendation_popup.hbs             | 46 ++++++++++----------
 .../configs/widgets/config_widget_view.js       |  5 ++-
 .../configs/objects/service_config_test.js      |  7 ++-
 9 files changed, 64 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1319763e/ambari-web/app/controllers/wizard/step7_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js b/ambari-web/app/controllers/wizard/step7_controller.js
index ee37427..634c86b 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -680,7 +680,10 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
               themeResource.get('configProperties').forEach(function (_configId) {
                 configs.forEach(function (item) {
                   if (App.config.configId(item.name, item.filename) === _configId) {
-                    item.isVisible = valueAttributes['visible'];
+                    // if config has already been hidden by condition with "subsection" or "subsectionTab" type
+                    // then ignore condition of "config" type
+                    if (configCondition.get('type') === 'config' && item.hiddenBySection) return false;
+                    item.hiddenBySection = !valueAttributes['visible'];
                   }
                 });
               }, this);

http://git-wip-us.apache.org/repos/asf/ambari/blob/1319763e/ambari-web/app/mixins/common/serverValidator.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/serverValidator.js b/ambari-web/app/mixins/common/serverValidator.js
index 231c376..99a5921 100644
--- a/ambari-web/app/mixins/common/serverValidator.js
+++ b/ambari-web/app/mixins/common/serverValidator.js
@@ -301,7 +301,7 @@ App.ServerValidatorMixin = Em.Mixin.create({
         : self.get('stepConfigs');
       var configsWithErrors = stepConfigs.some(function (step) {
         return step.get('configs').some(function(c) {
-          return c.get('isVisible') && (c.get('warn') || c.get('error'));
+          return c.get('isVisible') && !c.get('hiddenBySection') && (c.get('warn') || c.get('error'));
         })
       });
       if (configsWithErrors) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1319763e/ambari-web/app/models/configs/objects/service_config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/objects/service_config.js b/ambari-web/app/models/configs/objects/service_config.js
index c75486c..56b8aa4 100644
--- a/ambari-web/app/models/configs/objects/service_config.js
+++ b/ambari-web/app/models/configs/objects/service_config.js
@@ -38,7 +38,11 @@ App.ServiceConfig = Ember.Object.extend({
 
   errorCount: Em.computed.alias('configsWithErrors.length'),
 
-  visibleProperties: Em.computed.filterBy('configs', 'isVisible', true),
+  visibleProperties: function() {
+    return this.get('configs').filter(function(c) {
+      return c.get('isVisible') && !c.get('hiddenBySection');
+    });
+  }.property('configs.@each.isVisible', 'configs.@each.hiddenBySection'),
 
   configsWithErrors: function() {
     return this.get('visibleProperties').filter(function(c) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1319763e/ambari-web/app/models/configs/objects/service_config_property.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/objects/service_config_property.js b/ambari-web/app/models/configs/objects/service_config_property.js
index 422dc22..21e3dd3 100644
--- a/ambari-web/app/models/configs/objects/service_config_property.js
+++ b/ambari-web/app/models/configs/objects/service_config_property.js
@@ -180,6 +180,12 @@ App.ServiceConfigProperty = Em.Object.extend({
   showAsTextBox: false,
 
   /**
+   * config is invisible since wrapper section is hidden
+   * @type {boolean}
+   */
+  hiddenBySection: false,
+
+  /**
    * @type {boolean}
    */
   recommendedValueExists: function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1319763e/ambari-web/app/models/configs/theme/sub_section.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/theme/sub_section.js b/ambari-web/app/models/configs/theme/sub_section.js
index 62729ff..7274569 100644
--- a/ambari-web/app/models/configs/theme/sub_section.js
+++ b/ambari-web/app/models/configs/theme/sub_section.js
@@ -94,7 +94,11 @@ App.SubSection = DS.Model.extend({
 
   showTabs: Em.computed.and('hasTabs', 'someSubSectionTabIsVisible'),
 
-  visibleProperties: Em.computed.filterBy('configs', 'isVisible', true),
+  visibleProperties: function() {
+    return this.get('configs').filter(function(c) {
+      return c.get('isVisible') && !c.get('hiddenBySection');
+    });
+  }.property('configs.@each.isVisible', 'configs.@each.hiddenBySection'),
 
   visibleTabs: Em.computed.filterBy('subSectionTabs', 'isVisible', true),
 
@@ -174,9 +178,11 @@ App.SubSection = DS.Model.extend({
    * @type {boolean}
    */
   isHiddenByFilter: function () {
-    var configs = this.get('visibleProperties');
+    var configs = this.get('configs').filter(function(c) {
+      return !c.get('hiddenBySection') && c.get('isVisible');
+    });
     return configs.length ? configs.everyProperty('isHiddenByFilter', true) : false;
-  }.property('visibleProperties.@each.isHiddenByFilter'),
+  }.property('configs.@each.isHiddenByFilter'),
 
   /**
    * @type {boolean}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1319763e/ambari-web/app/models/configs/theme/sub_section_tab.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/configs/theme/sub_section_tab.js b/ambari-web/app/models/configs/theme/sub_section_tab.js
index 236c5c7..2262882 100644
--- a/ambari-web/app/models/configs/theme/sub_section_tab.js
+++ b/ambari-web/app/models/configs/theme/sub_section_tab.js
@@ -55,7 +55,11 @@ App.SubSectionTab = DS.Model.extend({
    */
   isActive: DS.attr('boolean', {defaultValue: false}),
 
-  visibleProperties: Em.computed.filterBy('configs', 'isVisible', true),
+  visibleProperties: function() {
+    return this.get('configs').filter(function(c) {
+      return c.get('isVisible') && !c.get('hiddenBySection');
+    });
+  }.property('configs.@each.isVisible', 'configs.@each.hiddenBySection'),
 
   /**
    * Number of the errors in all configs

http://git-wip-us.apache.org/repos/asf/ambari/blob/1319763e/ambari-web/app/templates/common/modal_popups/config_recommendation_popup.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/modal_popups/config_recommendation_popup.hbs b/ambari-web/app/templates/common/modal_popups/config_recommendation_popup.hbs
index fab5742..f58e086 100644
--- a/ambari-web/app/templates/common/modal_popups/config_recommendation_popup.hbs
+++ b/ambari-web/app/templates/common/modal_popups/config_recommendation_popup.hbs
@@ -31,28 +31,30 @@
       {{#each service in view.serviceConfigs}}
         {{#each property in service.configs}}
           {{#if property.isVisible}}
-            {{#if property.warn}}
-              <tr>
-                <td>{{property.serviceName}}</td>
-                <td>{{property.name}}</td>
-                <td>{{property.value}}</td>
-                <td>
-                  <div class="property-message">{{property.warnMessage}}</div>
-                  <div class="property-description">{{property.description}}</div>
-                </td>
-              </tr>
-            {{/if}}
-            {{#if property.error}}
-              <tr>
-                <td>{{property.serviceName}}</td>
-                <td>{{property.name}}</td>
-                <td>{{property.value}}</td>
-                <td>
-                  <div class="property-message">{{property.errorMessage}}</div>
-                  <div class="property-description">{{property.description}}</div>
-                </td>
-              </tr>
-            {{/if}}
+            {{#unless property.hiddenBySection}}
+              {{#if property.warn}}
+                <tr>
+                  <td>{{property.serviceName}}</td>
+                  <td>{{property.name}}</td>
+                  <td>{{property.value}}</td>
+                  <td>
+                    <div class="property-message">{{property.warnMessage}}</div>
+                    <div class="property-description">{{property.description}}</div>
+                  </td>
+                </tr>
+              {{/if}}
+              {{#if property.error}}
+                <tr>
+                  <td>{{property.serviceName}}</td>
+                  <td>{{property.name}}</td>
+                  <td>{{property.value}}</td>
+                  <td>
+                    <div class="property-message">{{property.errorMessage}}</div>
+                    <div class="property-description">{{property.description}}</div>
+                  </td>
+                </tr>
+              {{/if}}
+            {{/unless}}
           {{/if}}
         {{/each}}
       {{/each}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1319763e/ambari-web/app/views/common/configs/widgets/config_widget_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/widgets/config_widget_view.js b/ambari-web/app/views/common/configs/widgets/config_widget_view.js
index 9052b2a..e9eaed2 100644
--- a/ambari-web/app/views/common/configs/widgets/config_widget_view.js
+++ b/ambari-web/app/views/common/configs/widgets/config_widget_view.js
@@ -434,6 +434,9 @@ App.ConfigWidgetView = Em.View.extend(App.SupportsDependentConfigs, App.WidgetPo
         var conditionalConfig = serviceConfigs.filterProperty('filename',conditionalConfigFileName).findProperty('name', conditionalConfigName);
         if (conditionalConfig) {
           conditionalConfig.set(valueAttribute, valueAttributes[key]);
+          if (valueAttribute === 'isVisible') {
+            conditionalConfig.set('hiddenBySection', !valueAttributes[key]);
+          }
         }
       }
     }
@@ -457,7 +460,7 @@ App.ConfigWidgetView = Em.View.extend(App.SupportsDependentConfigs, App.WidgetPo
           themeResource = App.SubSectionTab.find().findProperty('name', subsectionConditionName);
         }
         themeResource.set('isHiddenByConfig', !valueAttributes['visible']);
-        themeResource.get('configs').setEach('isVisible', valueAttributes['visible']);
+        themeResource.get('configs').setEach('hiddenBySection', !valueAttributes['visible']);
       }
     }
   },

http://git-wip-us.apache.org/repos/asf/ambari/blob/1319763e/ambari-web/test/models/configs/objects/service_config_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/models/configs/objects/service_config_test.js b/ambari-web/test/models/configs/objects/service_config_test.js
index e12f23d..8a757ea 100644
--- a/ambari-web/test/models/configs/objects/service_config_test.js
+++ b/ambari-web/test/models/configs/objects/service_config_test.js
@@ -25,30 +25,35 @@ var serviceConfig,
       Em.Object.create({
         'name': 'p1',
         'isVisible': true,
+        'hiddenBySection': false,
         'isValid': true,
         'isValidOverride': true
       }),
       Em.Object.create({
         'name': 'p2',
         'isVisible': false,
+        'hiddenBySection': false,
         'isValid': true,
         'isValidOverride': true
       }),
       Em.Object.create({
         'name': 'p3',
         'isVisible': true,
+        'hiddenBySection': true,
         'isValid': true,
         'isValidOverride': true
       }),
       Em.Object.create({
         'name': 'p4',
         'isVisible': true,
+        'hiddenBySection': false,
         'isValid': false,
         'isValidOverride': true
       }),
       Em.Object.create({
         'name': 'p5',
         'isVisible': true,
+        'hiddenBySection': false,
         'isValid': true,
         'isValidOverride': false
       })
@@ -64,7 +69,7 @@ describe('App.ServiceConfig', function () {
 
   describe('#visibleProperties', function() {
     it('returns collection of properties that should be shown', function() {
-      expect(serviceConfig.get('visibleProperties').mapProperty('name')).to.be.eql(['p1','p3', 'p4','p5']);
+      expect(serviceConfig.get('visibleProperties').mapProperty('name')).to.be.eql(['p1','p4','p5']);
     });
   });
 


[16/50] [abbrv] ambari git commit: AMBARI-14945. Some options have not been applied on Ambari Metrics Collector. (Masahiro TANAKA via swagle)

Posted by jo...@apache.org.
AMBARI-14945. Some options have not been applied on Ambari Metrics Collector. (Masahiro TANAKA via swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6ffe5145
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6ffe5145
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6ffe5145

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 6ffe51450d359a9ad076083a0db8976d6d95c74f
Parents: 1ead250
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Thu Feb 11 17:17:36 2016 -0800
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Thu Feb 11 17:17:36 2016 -0800

----------------------------------------------------------------------
 .../conf/unix/ambari-metrics-collector                             | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6ffe5145/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector b/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
index bf2fc47..f83af50 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
+++ b/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
@@ -261,7 +261,7 @@ function start()
     rm -f "${PIDFILE}" >/dev/null 2>&1
   fi
 
-  nohup "${JAVA}" "-Xms$AMS_COLLECTOR_HEAPSIZE" "-Xmx$AMS_COLLECTOR_HEAPSIZE" "${AMS_COLLECTOR_OPTS}" "-cp" "/usr/lib/ambari-metrics-collector/*:${COLLECTOR_CONF_DIR}" "-Djava.net.preferIPv4Stack=true" "-Dams.log.dir=${AMS_COLLECTOR_LOG_DIR}" "-Dproc_${DAEMON_NAME}" "${CLASS}" "$@" > $OUTFILE 2>&1 &
+  nohup "${JAVA}" "-Xms$AMS_COLLECTOR_HEAPSIZE" "-Xmx$AMS_COLLECTOR_HEAPSIZE" ${AMS_COLLECTOR_OPTS} "-cp" "/usr/lib/ambari-metrics-collector/*:${COLLECTOR_CONF_DIR}" "-Djava.net.preferIPv4Stack=true" "-Dams.log.dir=${AMS_COLLECTOR_LOG_DIR}" "-Dproc_${DAEMON_NAME}" "${CLASS}" "$@" > $OUTFILE 2>&1 &
   PID=$!
   write_pidfile "${PIDFILE}"
   sleep 2


[22/50] [abbrv] ambari git commit: AMBARI-14936. Tweaks to reduce build time (aonishuk)

Posted by jo...@apache.org.
AMBARI-14936. Tweaks to reduce build time (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/77daca7a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/77daca7a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/77daca7a

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 77daca7a62abb3f6caa8f4be349aee704a1d7267
Parents: ddba3c5
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Fri Feb 12 16:17:36 2016 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Fri Feb 12 16:17:36 2016 +0200

----------------------------------------------------------------------
 ambari-agent/pom.xml  | 6 +++---
 ambari-server/pom.xml | 6 +++---
 ambari-web/pom.xml    | 3 ++-
 pom.xml               | 1 +
 4 files changed, 9 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/77daca7a/ambari-agent/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-agent/pom.xml b/ambari-agent/pom.xml
index bb7cc34..c2c993f 100644
--- a/ambari-agent/pom.xml
+++ b/ambari-agent/pom.xml
@@ -267,7 +267,7 @@
         <executions>
           <execution>
             <id>build-tarball</id>
-            <phase>package</phase>
+            <phase>${assemblyPhase}</phase>
             <goals>
               <goal>single</goal>
             </goals>
@@ -347,7 +347,7 @@
           </execution>
           <execution>
             <id>copy-repo-resources</id>
-            <phase>package</phase>
+            <phase>${assemblyPhase}</phase>
             <goals>
               <goal>copy-resources</goal>
             </goals>
@@ -391,7 +391,7 @@
         <executions>
           <execution>
             <id>rename-file</id>
-            <phase>package</phase>
+            <phase>${assemblyPhase}</phase>
             <goals>
               <goal>rename</goal>
             </goals>

http://git-wip-us.apache.org/repos/asf/ambari/blob/77daca7a/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 33b5501..b0794c9 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -134,7 +134,7 @@
         <executions>
           <execution>
             <id>build-tarball</id>
-            <phase>package</phase>
+            <phase>${assemblyPhase}</phase>
             <goals>
               <goal>single</goal>
             </goals>
@@ -173,7 +173,7 @@
           </execution>
           <execution>
             <id>copy-repo-resources</id>
-            <phase>package</phase>
+            <phase>${assemblyPhase}</phase>
             <goals>
               <goal>copy-resources</goal>
             </goals>
@@ -217,7 +217,7 @@
         <executions>
           <execution>
             <id>rename-file</id>
-            <phase>package</phase>
+            <phase>${assemblyPhase}</phase>
             <goals>
               <goal>rename</goal>
             </goals>

http://git-wip-us.apache.org/repos/asf/ambari/blob/77daca7a/ambari-web/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-web/pom.xml b/ambari-web/pom.xml
index 761a3f0..6304b3d 100644
--- a/ambari-web/pom.xml
+++ b/ambari-web/pom.xml
@@ -32,6 +32,7 @@
   <properties>
     <ambari.dir>${project.parent.parent.basedir}</ambari.dir>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    <nodemodules.dir>node_modules</nodemodules.dir> <!-- specify -Dnodemodules.dir option to reduce ambari-web build time by not re-downloading npm modules -->
   </properties>
   <build>
     <plugins>
@@ -100,7 +101,7 @@
             <configuration>
               <executable>${executable.rmdir}</executable>
               <workingDirectory>${basedir}</workingDirectory>
-              <commandlineArgs>${args.rm.clean} public node_modules</commandlineArgs>
+              <commandlineArgs>${args.rm.clean} public ${nodemodules.dir}</commandlineArgs>
               <successCodes>
                 <successCode>0</successCode>
                 <successCode>1</successCode>

http://git-wip-us.apache.org/repos/asf/ambari/blob/77daca7a/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 57c6de7..cf9993b 100644
--- a/pom.xml
+++ b/pom.xml
@@ -60,6 +60,7 @@
     <distMgmtStagingId>apache.staging.https</distMgmtStagingId>
     <distMgmtStagingName>Apache Release Distribution Repository</distMgmtStagingName>
     <distMgmtStagingUrl>https://repository.apache.org/service/local/staging/deploy/maven2</distMgmtStagingUrl>
+    <assemblyPhase>package</assemblyPhase> <!-- use -DassemblyPhase=none to skip building tarball, useful when you want purely compile jar -->
   </properties>
   <pluginRepositories>
     <pluginRepository>


[07/50] [abbrv] ambari git commit: AMBARI-14885: After exporting blueprint from existing cluster knox_master_secret is exported. (arborkar via dili)

Posted by jo...@apache.org.
AMBARI-14885: After exporting blueprint from existing cluster knox_master_secret is exported. (arborkar via dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7d1ab29c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7d1ab29c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7d1ab29c

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 7d1ab29c5860a8cab6f19eb98c0a7bd25529ec5e
Parents: 966f303
Author: Di Li <di...@apache.org>
Authored: Thu Feb 11 13:54:01 2016 -0500
Committer: Di Li <di...@apache.org>
Committed: Thu Feb 11 13:54:01 2016 -0500

----------------------------------------------------------------------
 .../BlueprintConfigurationProcessor.java         |  2 +-
 .../BlueprintConfigurationProcessorTest.java     | 19 +++++++++++++++++--
 2 files changed, 18 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7d1ab29c/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index de31a0d..7fb2592 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -2605,7 +2605,7 @@ public class BlueprintConfigurationProcessor {
    */
   private static class PasswordPropertyFilter implements PropertyFilter {
 
-    private static final Pattern PASSWORD_NAME_REGEX = Pattern.compile("\\S+PASSWORD", Pattern.CASE_INSENSITIVE);
+    private static final Pattern PASSWORD_NAME_REGEX = Pattern.compile("\\S+(PASSWORD|SECRET)", Pattern.CASE_INSENSITIVE);
 
     /**
      * Query to determine if a given property should be included in a collection of

http://git-wip-us.apache.org/repos/asf/ambari/blob/7d1ab29c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 7a77a25..9c76e8a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -689,14 +689,21 @@ public class BlueprintConfigurationProcessorTest {
     typeProps.put("test.ssl.password", "test-password-five");
     typeProps.put("test.password.should.be.included", "test-another-pwd");
 
+    //Checking functionality for fields marked as SECRET
+    Map<String, String> secretProps = new HashMap<String, String>();
+    secretProps.put("knox_master_secret", "test-secret-one");
+    secretProps.put("test.secret.should.be.included", "test-another-secret");
     // create a custom config type, to verify that the filters can
     // be applied across all config types
     Map<String, String> customProps = new HashMap<String, String>();
     customProps.put("my_test_PASSWORD", "should be excluded");
     customProps.put("PASSWORD_mytest", "should be included");
 
+    customProps.put("my_test_SECRET", "should be excluded");
+    customProps.put("SECRET_mytest", "should be included");
     properties.put("ranger-yarn-plugin-properties", typeProps);
     properties.put("custom-test-properties", customProps);
+    properties.put("secret-test-properties", secretProps);
 
     Configuration clusterConfig = new Configuration(properties,
       Collections.<String, Map<String, Map<String, String>>>emptyMap());
@@ -721,10 +728,12 @@ public class BlueprintConfigurationProcessorTest {
     configProcessor.doUpdateForBlueprintExport();
 
 
-    assertEquals("Exported properties map was not of the expected size", 1,
+    assertEquals("Exported properties map was not of the expected size", 2,
       properties.get("custom-test-properties").size());
     assertEquals("ranger-yarn-plugin-properties config type was not properly exported", 1,
       properties.get("ranger-yarn-plugin-properties").size());
+    assertEquals("Exported secret properties map was not of the expected size", 1,
+      properties.get("secret-test-properties").size());
 
     // verify that the following password properties matching the "*_PASSWORD" rule have been excluded
     assertFalse("Password property should have been excluded",
@@ -743,9 +752,15 @@ public class BlueprintConfigurationProcessorTest {
     assertTrue("Expected password property not found",
       properties.get("ranger-yarn-plugin-properties").containsKey("test.password.should.be.included"));
 
+    // verify that the following password properties matching the "*_SECRET" rule have been excluded
+    assertFalse("Secret property should have been excluded",
+	      properties.get("secret-test-properties").containsKey("knox_master_secret"));
+    // verify that the property that does not match the "*_SECRET" rule is still included
+    assertTrue("Expected secret property not found",
+	      properties.get("secret-test-properties").containsKey("test.secret.should.be.included"));
     // verify the custom properties map has been modified by the filters
     assertEquals("custom-test-properties type was not properly exported",
-      1, properties.get("custom-test-properties").size());
+      2, properties.get("custom-test-properties").size());
 
     // verify that the following password properties matching the "*_PASSWORD" rule have been excluded
     assertFalse("Password property should have been excluded",


[40/50] [abbrv] ambari git commit: AMBARI-15056. Long hostnames are not truncated in Alerts dialog (alexantonenko)

Posted by jo...@apache.org.
AMBARI-15056. Long hostnames are not truncated in Alerts dialog (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ea699bb9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ea699bb9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ea699bb9

Branch: refs/heads/branch-dev-patch-upgrade
Commit: ea699bb9b1be059949fa6f1ddf6f563e52ed4f5f
Parents: 5fcb716
Author: Alex Antonenko <hi...@gmail.com>
Authored: Tue Feb 16 14:56:21 2016 +0200
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Tue Feb 16 15:32:16 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/styles/alerts.less               | 25 +++++++++-------
 .../main/alerts/instance_service_host.hbs       | 30 +++++++++++---------
 .../main/alerts/definition_details_view.js      |  8 ++++++
 3 files changed, 39 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ea699bb9/ambari-web/app/styles/alerts.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/alerts.less b/ambari-web/app/styles/alerts.less
index c4f163d..1b8d5f4 100644
--- a/ambari-web/app/styles/alerts.less
+++ b/ambari-web/app/styles/alerts.less
@@ -70,7 +70,7 @@
 
   .filter-row {
     th {
-      padding: 0px;
+      padding: 0;
       padding-left: 4px;
     }
   }
@@ -253,16 +253,16 @@
   .definition-name {
     .name-text-field{
       margin-left: 2px;
-      margin-bottom: 0px;
+      margin-bottom: 0;
       input {
         width: 99%;
-        margin-bottom: 0px;
+        margin-bottom: 0;
         margin-top: -4px;
         margin-left: -5px;
       }
     }
     .edit-buttons {
-      margin-bottom: 0px;
+      margin-bottom: 0;
       margin-top: -4px;
       float: left;
       margin-left: 7px;
@@ -407,7 +407,7 @@
 
 #manage-alert-notification-content {
   .notification-info .global-info .global-checkbox {
-    margin: 0px;
+    margin: 0;
   }
   .input-label {
     font-weight: bold;
@@ -465,7 +465,7 @@
 
 .create-edit-alert-notification-popup {
   .modal {
-    margin-top: 0px;
+    margin-top: 0;
     top: 5%;
     width: 600px;
   }
@@ -512,10 +512,10 @@
     height: 250px;
   }
   .btn-toolbar {
-    margin-top: 0px;
+    margin-top: 0;
   }
   .manage-configuration-group-content {
-    margin-bottom: 0px;
+    margin-bottom: 0;
   }
   .notification-editable-list {
     .title {
@@ -576,12 +576,17 @@
 }
 
 .alerts-popup-wrap {
+  .trim_hostname{
+    display: block;
+    overflow: hidden;
+    text-overflow: ellipsis;
+  }
   .top-wrap {
     width: 100%;
     border-bottom: 1px solid #CCC;
     text-align: center;
     font-size: 15px;
-    padding: 0px 0px 20px 0px;
+    padding: 0 0 20px 0;
     height: 20px;
     .name-top {
       width: 32%;
@@ -639,7 +644,7 @@
       cursor: pointer;
       border-top: 1px solid #CCC;
       text-align: center;
-      padding: 10px 10px 10px 0px;
+      padding: 10px 10px 10px 0;
       font-size: 16px;
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea699bb9/ambari-web/app/templates/main/alerts/instance_service_host.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/alerts/instance_service_host.hbs b/ambari-web/app/templates/main/alerts/instance_service_host.hbs
index cff614c..9aca8df 100644
--- a/ambari-web/app/templates/main/alerts/instance_service_host.hbs
+++ b/ambari-web/app/templates/main/alerts/instance_service_host.hbs
@@ -15,18 +15,20 @@
 * See the License for the specific language governing permissions and
 * limitations under the License.
 }}
-{{#if view.instance.serviceDisplayName}}
-  {{#if view.serviceIsLink}}
-    <a {{action goToService view.instance.service target="view.parentView"}} href="#">{{view.instance.serviceDisplayName}}</a>
-  {{else}}
-    {{view.instance.serviceDisplayName}}
+<span class="trim_hostname">
+  {{#if view.instance.serviceDisplayName}}
+    {{#if view.serviceIsLink}}
+      <a {{action goToService view.instance.service target="view.parentView"}} href="#">{{view.instance.serviceDisplayName}}</a>
+    {{else}}
+      {{view.instance.serviceDisplayName}}
+    {{/if}}
   {{/if}}
-{{/if}}
-{{#if view.showSeparator}}
-  &nbsp;/&nbsp;
-{{/if}}
-{{#if view.instance.hostName}}
-  <a {{action goToHostAlerts view.instance.host target="view.parentView"}} href="#">
-    {{view.instance.hostName}}
-  </a>
-{{/if}}
\ No newline at end of file
+  {{#if view.showSeparator}}
+    &nbsp;/&nbsp;
+  {{/if}}
+  {{#if view.instance.hostName}}
+    <a rel="UsageTooltip" {{action goToHostAlerts view.instance.host target="view.parentView"}} data-original-title="{{unbound view.instance.hostName}}" href="#">
+      {{view.instance.hostName}}
+    </a>
+  {{/if}}
+</span>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea699bb9/ambari-web/app/views/main/alerts/definition_details_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/alerts/definition_details_view.js b/ambari-web/app/views/main/alerts/definition_details_view.js
index 7467b0f..2fc75c1 100644
--- a/ambari-web/app/views/main/alerts/definition_details_view.js
+++ b/ambari-web/app/views/main/alerts/definition_details_view.js
@@ -298,6 +298,14 @@ App.AlertInstanceServiceHostView = Em.View.extend({
 
   templateName: require('templates/main/alerts/instance_service_host'),
 
+  didInsertElement: function () {
+    App.tooltip(this.$("[rel='UsageTooltip']"));
+  },
+
+  willDestroyElement: function() {
+    this.$("[rel='UsageTooltip']").remove();
+  },
+
   /**
    * Define whether show link for transition to service page
    */


[44/50] [abbrv] ambari git commit: AMBARI-15050 Https Support for Metrics System (dsen)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index cb34098..3d3987b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -1,30 +1,30 @@
 {
-    "roleCommand": "SERVICE_CHECK", 
-    "clusterName": "c1", 
-    "hostname": "c6401.ambari.apache.org", 
+    "roleCommand": "SERVICE_CHECK",
+    "clusterName": "c1",
+    "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "ambari_db_rca_password": "mapred", 
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "ambari_db_rca_password": "mapred",
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
         "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
-        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
+        "jce_name": "UnlimitedJCEPolicyJDK7.zip",
         "stack_version": "2.0",
-        "stack_name": "HDP", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "stack_name": "HDP",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
         "jdk_name": "jdk-7u67-linux-x64.tar.gz",
-        "ambari_db_rca_username": "mapred", 
+        "ambari_db_rca_username": "mapred",
         "java_home": "/usr/jdk64/jdk1.7.0_45",
         "java_version": "8",
         "db_name": "ambari",
         "group_list": "[\"hadoop\",\"nobody\",\"users\"]",
         "user_list": "[\"hive\",\"oozie\",\"nobody\",\"ambari-qa\",\"flume\",\"hdfs\",\"storm\",\"mapred\",\"hbase\",\"tez\",\"zookeeper\",\"falcon\",\"sqoop\",\"yarn\",\"hcat\"]"
-    }, 
-    "commandType": "EXECUTION_COMMAND", 
-    "roleParams": {}, 
+    },
+    "commandType": "EXECUTION_COMMAND",
+    "roleParams": {},
     "serviceName": "HIVE",
     "role": "HIVE_SERVER",
     "commandParams": {
-        "command_timeout": "300", 
+        "command_timeout": "300",
         "service_package_folder": "OOZIE",
         "script_type": "PYTHON",
         "script": "scripts/service_check.py",
@@ -37,442 +37,443 @@
         "output_file":"HDFS_CLIENT-configs.tar.gz",
         "refresh_topology": "True"
     },
-    "taskId": 152, 
-    "public_hostname": "c6401.ambari.apache.org", 
+    "taskId": 152,
+    "public_hostname": "c6401.ambari.apache.org",
     "configurations": {
         "mapred-site": {
-            "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 
-            "mapreduce.cluster.administrators": " hadoop", 
-            "mapreduce.reduce.input.buffer.percent": "0.0", 
-            "mapreduce.output.fileoutputformat.compress": "false", 
-            "mapreduce.framework.name": "yarn", 
-            "mapreduce.map.speculative": "false", 
-            "mapreduce.reduce.shuffle.merge.percent": "0.66", 
-            "yarn.app.mapreduce.am.resource.mb": "683", 
-            "mapreduce.map.java.opts": "-Xmx273m", 
-            "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*", 
-            "mapreduce.job.reduce.slowstart.completedmaps": "0.05", 
-            "mapreduce.output.fileoutputformat.compress.type": "BLOCK", 
-            "mapreduce.reduce.speculative": "false", 
-            "mapreduce.reduce.java.opts": "-Xmx546m", 
-            "mapreduce.am.max-attempts": "2", 
-            "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
-            "mapreduce.reduce.log.level": "INFO", 
-            "mapreduce.map.sort.spill.percent": "0.7", 
-            "mapreduce.task.timeout": "300000", 
-            "mapreduce.map.memory.mb": "341", 
-            "mapreduce.task.io.sort.factor": "100", 
-            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", 
-            "mapreduce.reduce.memory.mb": "683", 
-            "yarn.app.mapreduce.am.log.level": "INFO", 
-            "mapreduce.map.log.level": "INFO", 
-            "mapreduce.shuffle.port": "13562", 
-            "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`", 
-            "mapreduce.map.output.compress": "false", 
-            "yarn.app.mapreduce.am.staging-dir": "/user", 
-            "mapreduce.reduce.shuffle.parallelcopies": "30", 
-            "mapreduce.reduce.shuffle.input.buffer.percent": "0.7", 
-            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888", 
-            "mapreduce.jobhistory.done-dir": "/mr-history/done", 
-            "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
-            "mapreduce.task.io.sort.mb": "136", 
-            "yarn.app.mapreduce.am.command-opts": "-Xmx546m", 
+            "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020",
+            "mapreduce.cluster.administrators": " hadoop",
+            "mapreduce.reduce.input.buffer.percent": "0.0",
+            "mapreduce.output.fileoutputformat.compress": "false",
+            "mapreduce.framework.name": "yarn",
+            "mapreduce.map.speculative": "false",
+            "mapreduce.reduce.shuffle.merge.percent": "0.66",
+            "yarn.app.mapreduce.am.resource.mb": "683",
+            "mapreduce.map.java.opts": "-Xmx273m",
+            "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*",
+            "mapreduce.job.reduce.slowstart.completedmaps": "0.05",
+            "mapreduce.output.fileoutputformat.compress.type": "BLOCK",
+            "mapreduce.reduce.speculative": "false",
+            "mapreduce.reduce.java.opts": "-Xmx546m",
+            "mapreduce.am.max-attempts": "2",
+            "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
+            "mapreduce.reduce.log.level": "INFO",
+            "mapreduce.map.sort.spill.percent": "0.7",
+            "mapreduce.task.timeout": "300000",
+            "mapreduce.map.memory.mb": "341",
+            "mapreduce.task.io.sort.factor": "100",
+            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp",
+            "mapreduce.reduce.memory.mb": "683",
+            "yarn.app.mapreduce.am.log.level": "INFO",
+            "mapreduce.map.log.level": "INFO",
+            "mapreduce.shuffle.port": "13562",
+            "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`",
+            "mapreduce.map.output.compress": "false",
+            "yarn.app.mapreduce.am.staging-dir": "/user",
+            "mapreduce.reduce.shuffle.parallelcopies": "30",
+            "mapreduce.reduce.shuffle.input.buffer.percent": "0.7",
+            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888",
+            "mapreduce.jobhistory.done-dir": "/mr-history/done",
+            "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
+            "mapreduce.task.io.sort.mb": "136",
+            "yarn.app.mapreduce.am.command-opts": "-Xmx546m",
             "mapreduce.admin.map.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN"
-        }, 
+        },
         "oozie-site": {
-            "oozie.service.PurgeService.purge.interval": "3600", 
-            "oozie.service.CallableQueueService.queue.size": "1000", 
+            "oozie.service.PurgeService.purge.interval": "3600",
+            "oozie.service.CallableQueueService.queue.size": "1000",
             "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd",
-            "oozie.service.JPAService.jdbc.url": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true", 
-            "oozie.service.HadoopAccessorService.nameNode.whitelist": " ", 
-            "use.system.libpath.for.mapreduce.and.pig.jobs": "false", 
-            "oozie.db.schema.name": "oozie", 
-            "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials", 
-            "oozie.service.JPAService.create.db.schema": "false", 
-            "oozie.authentication.kerberos.name.rules": "\n        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        DEFAULT", 
-            "oozie.service.ActionService.executor.ext.classes": "\n            org.apache.oozie.action.email.EmailActionExecutor,\n            org.apache.oozie.action.hadoop.HiveActionExecutor,\n            org.apache.oozie.action.hadoop.ShellActionExecutor,\n            org.apache.oozie.action.hadoop.SqoopActionExecutor,\n            org.apache.oozie.action.hadoop.DistcpActionExecutor", 
-            "oozie.base.url": "http://c6402.ambari.apache.org:11000/oozie", 
-            "oozie.service.JPAService.jdbc.password": "asd", 
-            "oozie.service.coord.normal.default.timeout": "120", 
-            "oozie.service.AuthorizationService.security.enabled": "true", 
-            "oozie.service.JPAService.pool.max.active.conn": "10", 
-            "oozie.service.PurgeService.older.than": "30", 
-            "oozie.service.coord.push.check.requeue.interval": "30000", 
-            "oozie.service.HadoopAccessorService.hadoop.configurations": "*=/etc/hadoop/conf", 
-            "oozie.service.HadoopAccessorService.jobTracker.whitelist": " ", 
-            "oozie.service.CallableQueueService.callable.concurrency": "3", 
-            "oozie.service.JPAService.jdbc.username": "oozie", 
-            "oozie.service.CallableQueueService.threads": "10", 
-            "oozie.services.ext": "org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService", 
-            "oozie.systemmode": "NORMAL", 
-            "oozie.service.WorkflowAppService.system.libpath": "/user/${user.name}/share/lib", 
-            "oozie.services": "\n        org.apache.oozie.service.SchedulerService,\n        org.apache.oozie.service.InstrumentationService,\n        org.apache.oozie.service.CallableQueueService,\n        org.apache.oozie.service.UUIDService,\n        org.apache.oozie.service.ELService,\n        org.apache.oozie.service.AuthorizationService,\n        org.apache.oozie.service.UserGroupInformationService,\n        org.apache.oozie.service.HadoopAccessorService,\n        org.apache.oozie.service.URIHandlerService,\n        org.apache.oozie.service.MemoryLocksService,\n        org.apache.oozie.service.DagXLogInfoService,\n        org.apache.oozie.service.SchemaService,\n        org.apache.oozie.service.LiteWorkflowAppService,\n        org.apache.oozie.service.JPAService,\n        org.apache.oozie.service.StoreService,\n        org.apache.oozie.service.CoordinatorStoreService,\n        org.apache.oozie.service.SLAStoreService,\n        org.apache.oozie.service.DBLiteWorkflowStoreServic
 e,\n        org.apache.oozie.service.CallbackService,\n        org.apache.oozie.service.ActionService,\n        org.apache.oozie.service.ActionCheckerService,\n        org.apache.oozie.service.RecoveryService,\n        org.apache.oozie.service.PurgeService,\n        org.apache.oozie.service.CoordinatorEngineService,\n        org.apache.oozie.service.BundleEngineService,\n        org.apache.oozie.service.DagEngineService,\n        org.apache.oozie.service.CoordMaterializeTriggerService,\n        org.apache.oozie.service.StatusTransitService,\n        org.apache.oozie.service.PauseTransitService,\n        org.apache.oozie.service.GroupsService,\n        org.apache.oozie.service.ProxyUserService", 
-            "oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler", 
-            "oozie.authentication.type": "simple", 
-            "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver", 
+            "oozie.service.JPAService.jdbc.url": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true",
+            "oozie.service.HadoopAccessorService.nameNode.whitelist": " ",
+            "use.system.libpath.for.mapreduce.and.pig.jobs": "false",
+            "oozie.db.schema.name": "oozie",
+            "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials",
+            "oozie.service.JPAService.create.db.schema": "false",
+            "oozie.authentication.kerberos.name.rules": "\n        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        DEFAULT",
+            "oozie.service.ActionService.executor.ext.classes": "\n            org.apache.oozie.action.email.EmailActionExecutor,\n            org.apache.oozie.action.hadoop.HiveActionExecutor,\n            org.apache.oozie.action.hadoop.ShellActionExecutor,\n            org.apache.oozie.action.hadoop.SqoopActionExecutor,\n            org.apache.oozie.action.hadoop.DistcpActionExecutor",
+            "oozie.base.url": "http://c6402.ambari.apache.org:11000/oozie",
+            "oozie.service.JPAService.jdbc.password": "asd",
+            "oozie.service.coord.normal.default.timeout": "120",
+            "oozie.service.AuthorizationService.security.enabled": "true",
+            "oozie.service.JPAService.pool.max.active.conn": "10",
+            "oozie.service.PurgeService.older.than": "30",
+            "oozie.service.coord.push.check.requeue.interval": "30000",
+            "oozie.service.HadoopAccessorService.hadoop.configurations": "*=/etc/hadoop/conf",
+            "oozie.service.HadoopAccessorService.jobTracker.whitelist": " ",
+            "oozie.service.CallableQueueService.callable.concurrency": "3",
+            "oozie.service.JPAService.jdbc.username": "oozie",
+            "oozie.service.CallableQueueService.threads": "10",
+            "oozie.services.ext": "org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService",
+            "oozie.systemmode": "NORMAL",
+            "oozie.service.WorkflowAppService.system.libpath": "/user/${user.name}/share/lib",
+            "oozie.services": "\n        org.apache.oozie.service.SchedulerService,\n        org.apache.oozie.service.InstrumentationService,\n        org.apache.oozie.service.CallableQueueService,\n        org.apache.oozie.service.UUIDService,\n        org.apache.oozie.service.ELService,\n        org.apache.oozie.service.AuthorizationService,\n        org.apache.oozie.service.UserGroupInformationService,\n        org.apache.oozie.service.HadoopAccessorService,\n        org.apache.oozie.service.URIHandlerService,\n        org.apache.oozie.service.MemoryLocksService,\n        org.apache.oozie.service.DagXLogInfoService,\n        org.apache.oozie.service.SchemaService,\n        org.apache.oozie.service.LiteWorkflowAppService,\n        org.apache.oozie.service.JPAService,\n        org.apache.oozie.service.StoreService,\n        org.apache.oozie.service.CoordinatorStoreService,\n        org.apache.oozie.service.SLAStoreService,\n        org.apache.oozie.service.DBLiteWorkflowStoreServic
 e,\n        org.apache.oozie.service.CallbackService,\n        org.apache.oozie.service.ActionService,\n        org.apache.oozie.service.ActionCheckerService,\n        org.apache.oozie.service.RecoveryService,\n        org.apache.oozie.service.PurgeService,\n        org.apache.oozie.service.CoordinatorEngineService,\n        org.apache.oozie.service.BundleEngineService,\n        org.apache.oozie.service.DagEngineService,\n        org.apache.oozie.service.CoordMaterializeTriggerService,\n        org.apache.oozie.service.StatusTransitService,\n        org.apache.oozie.service.PauseTransitService,\n        org.apache.oozie.service.GroupsService,\n        org.apache.oozie.service.ProxyUserService",
+            "oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler",
+            "oozie.authentication.type": "simple",
+            "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver",
             "oozie.system.id": "oozie-${user.name}"
-        }, 
+        },
         "storm-site": {
-            "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer", 
-            "topology.workers": "1", 
-            "drpc.worker.threads": "64", 
-            "storm.zookeeper.servers": "['c6401.ambari.apache.org','c6402.ambari.apache.org']", 
-            "supervisor.heartbeat.frequency.secs": "5", 
-            "topology.executor.send.buffer.size": "1024", 
-            "drpc.childopts": "-Xmx768m", 
-            "nimbus.thrift.port": "6627", 
-            "storm.zookeeper.retry.intervalceiling.millis": "30000", 
-            "storm.local.dir": "/hadoop/storm", 
-            "topology.receiver.buffer.size": "8", 
-            "storm.messaging.netty.client_worker_threads": "1", 
-            "transactional.zookeeper.root": "/transactional", 
-            "drpc.request.timeout.secs": "600", 
-            "topology.skip.missing.kryo.registrations": "false", 
-            "worker.heartbeat.frequency.secs": "1", 
-            "zmq.hwm": "0", 
-            "storm.zookeeper.connection.timeout": "15000", 
-            "topology.max.error.report.per.interval": "5", 
-            "storm.messaging.netty.server_worker_threads": "1", 
-            "supervisor.worker.start.timeout.secs": "120", 
-            "zmq.threads": "1", 
-            "topology.acker.executors": "null", 
-            "storm.local.mode.zmq": "false", 
-            "topology.max.task.parallelism": "null", 
-            "storm.zookeeper.port": "2181", 
-            "nimbus.childopts": "-Xmx1024m", 
-            "worker.childopts": "-Xmx768m", 
-            "drpc.queue.size": "128", 
-            "storm.zookeeper.retry.times": "5", 
-            "nimbus.monitor.freq.secs": "10", 
-            "storm.cluster.mode": "distributed", 
-            "dev.zookeeper.path": "/tmp/dev-storm-zookeeper", 
-            "drpc.invocations.port": "3773", 
-            "storm.zookeeper.root": "/storm", 
-            "logviewer.childopts": "-Xmx128m", 
-            "transactional.zookeeper.port": "null", 
-            "topology.worker.childopts": "null", 
-            "topology.max.spout.pending": "null", 
-            "nimbus.cleanup.inbox.freq.secs": "600", 
-            "storm.messaging.netty.min_wait_ms": "100", 
-            "nimbus.task.timeout.secs": "30", 
-            "nimbus.thrift.max_buffer_size": "1048576", 
-            "topology.sleep.spout.wait.strategy.time.ms": "1", 
-            "topology.optimize": "true", 
-            "nimbus.reassign": "true", 
-            "storm.messaging.transport": "backtype.storm.messaging.netty.Context", 
-            "logviewer.appender.name": "A1", 
-            "nimbus.host": "c6401.ambari.apache.org", 
-            "ui.port": "8744", 
-            "supervisor.slots.ports": "[6700, 6701]", 
-            "nimbus.file.copy.expiration.secs": "600", 
-            "supervisor.monitor.frequency.secs": "3", 
-            "ui.childopts": "-Xmx768m", 
-            "transactional.zookeeper.servers": "null", 
-            "zmq.linger.millis": "5000", 
-            "topology.error.throttle.interval.secs": "10", 
-            "topology.worker.shared.thread.pool.size": "4", 
-            "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib", 
-            "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy", 
-            "task.heartbeat.frequency.secs": "3", 
-            "topology.transfer.buffer.size": "1024", 
-            "storm.zookeeper.session.timeout": "20000", 
-            "topology.executor.receive.buffer.size": "1024", 
-            "topology.stats.sample.rate": "0.05", 
-            "topology.fall.back.on.java.serialization": "true", 
-            "supervisor.childopts": "-Xmx256m", 
-            "topology.enable.message.timeouts": "true", 
-            "storm.messaging.netty.max_wait_ms": "1000", 
-            "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator", 
-            "nimbus.supervisor.timeout.secs": "60", 
-            "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy", 
-            "nimbus.inbox.jar.expiration.secs": "3600", 
-            "drpc.port": "3772", 
-            "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory", 
-            "storm.zookeeper.retry.interval": "1000", 
-            "storm.messaging.netty.max_retries": "30", 
-            "topology.tick.tuple.freq.secs": "null", 
-            "supervisor.enable": "true", 
-            "nimbus.task.launch.secs": "120", 
-            "task.refresh.poll.secs": "10", 
-            "topology.message.timeout.secs": "30", 
-            "storm.messaging.netty.buffer_size": "5242880", 
-            "topology.state.synchronization.timeout.secs": "60", 
-            "supervisor.worker.timeout.secs": "30", 
-            "topology.trident.batch.emit.interval.millis": "500", 
-            "topology.builtin.metrics.bucket.size.secs": "60", 
-            "storm.thrift.transport": "backtype.storm.security.auth.SimpleTransportPlugin", 
-            "logviewer.port": "8000", 
+            "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer",
+            "topology.workers": "1",
+            "drpc.worker.threads": "64",
+            "storm.zookeeper.servers": "['c6401.ambari.apache.org','c6402.ambari.apache.org']",
+            "supervisor.heartbeat.frequency.secs": "5",
+            "topology.executor.send.buffer.size": "1024",
+            "drpc.childopts": "-Xmx768m",
+            "nimbus.thrift.port": "6627",
+            "storm.zookeeper.retry.intervalceiling.millis": "30000",
+            "storm.local.dir": "/hadoop/storm",
+            "topology.receiver.buffer.size": "8",
+            "storm.messaging.netty.client_worker_threads": "1",
+            "transactional.zookeeper.root": "/transactional",
+            "drpc.request.timeout.secs": "600",
+            "topology.skip.missing.kryo.registrations": "false",
+            "worker.heartbeat.frequency.secs": "1",
+            "zmq.hwm": "0",
+            "storm.zookeeper.connection.timeout": "15000",
+            "topology.max.error.report.per.interval": "5",
+            "storm.messaging.netty.server_worker_threads": "1",
+            "supervisor.worker.start.timeout.secs": "120",
+            "zmq.threads": "1",
+            "topology.acker.executors": "null",
+            "storm.local.mode.zmq": "false",
+            "topology.max.task.parallelism": "null",
+            "storm.zookeeper.port": "2181",
+            "nimbus.childopts": "-Xmx1024m",
+            "worker.childopts": "-Xmx768m",
+            "drpc.queue.size": "128",
+            "storm.zookeeper.retry.times": "5",
+            "nimbus.monitor.freq.secs": "10",
+            "storm.cluster.mode": "distributed",
+            "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
+            "drpc.invocations.port": "3773",
+            "storm.zookeeper.root": "/storm",
+            "logviewer.childopts": "-Xmx128m",
+            "transactional.zookeeper.port": "null",
+            "topology.worker.childopts": "null",
+            "topology.max.spout.pending": "null",
+            "nimbus.cleanup.inbox.freq.secs": "600",
+            "storm.messaging.netty.min_wait_ms": "100",
+            "nimbus.task.timeout.secs": "30",
+            "nimbus.thrift.max_buffer_size": "1048576",
+            "topology.sleep.spout.wait.strategy.time.ms": "1",
+            "topology.optimize": "true",
+            "nimbus.reassign": "true",
+            "storm.messaging.transport": "backtype.storm.messaging.netty.Context",
+            "logviewer.appender.name": "A1",
+            "nimbus.host": "c6401.ambari.apache.org",
+            "ui.port": "8744",
+            "supervisor.slots.ports": "[6700, 6701]",
+            "nimbus.file.copy.expiration.secs": "600",
+            "supervisor.monitor.frequency.secs": "3",
+            "ui.childopts": "-Xmx768m",
+            "transactional.zookeeper.servers": "null",
+            "zmq.linger.millis": "5000",
+            "topology.error.throttle.interval.secs": "10",
+            "topology.worker.shared.thread.pool.size": "4",
+            "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib",
+            "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy",
+            "task.heartbeat.frequency.secs": "3",
+            "topology.transfer.buffer.size": "1024",
+            "storm.zookeeper.session.timeout": "20000",
+            "topology.executor.receive.buffer.size": "1024",
+            "topology.stats.sample.rate": "0.05",
+            "topology.fall.back.on.java.serialization": "true",
+            "supervisor.childopts": "-Xmx256m",
+            "topology.enable.message.timeouts": "true",
+            "storm.messaging.netty.max_wait_ms": "1000",
+            "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator",
+            "nimbus.supervisor.timeout.secs": "60",
+            "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy",
+            "nimbus.inbox.jar.expiration.secs": "3600",
+            "drpc.port": "3772",
+            "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory",
+            "storm.zookeeper.retry.interval": "1000",
+            "storm.messaging.netty.max_retries": "30",
+            "topology.tick.tuple.freq.secs": "null",
+            "supervisor.enable": "true",
+            "nimbus.task.launch.secs": "120",
+            "task.refresh.poll.secs": "10",
+            "topology.message.timeout.secs": "30",
+            "storm.messaging.netty.buffer_size": "5242880",
+            "topology.state.synchronization.timeout.secs": "60",
+            "supervisor.worker.timeout.secs": "30",
+            "topology.trident.batch.emit.interval.millis": "500",
+            "topology.builtin.metrics.bucket.size.secs": "60",
+            "storm.thrift.transport": "backtype.storm.security.auth.SimpleTransportPlugin",
+            "logviewer.port": "8000",
             "topology.debug": "false"
-        }, 
+        },
         "webhcat-site": {
-            "templeton.pig.path": "pig.tar.gz/pig/bin/pig", 
-            "templeton.exec.timeout": "60000", 
-            "templeton.override.enabled": "false", 
-            "templeton.jar": "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar", 
-            "templeton.zookeeper.hosts": "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181", 
-            "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://c6402.ambari.apache.org:9083,hive.metastore.sasl.enabled=yes,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse", 
-            "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage", 
-            "templeton.hive.archive": "hdfs:///apps/webhcat/hive.tar.gz", 
-            "templeton.streaming.jar": "hdfs:///apps/webhcat/hadoop-streaming.jar", 
-            "templeton.port": "50111", 
-            "templeton.libjars": "/usr/lib/zookeeper/zookeeper.jar", 
-            "templeton.hadoop": "/usr/bin/hadoop", 
-            "templeton.hive.path": "hive.tar.gz/hive/bin/hive", 
-            "templeton.hadoop.conf.dir": "/etc/hadoop/conf", 
-            "templeton.hcat": "/usr/bin/hcat", 
+            "templeton.pig.path": "pig.tar.gz/pig/bin/pig",
+            "templeton.exec.timeout": "60000",
+            "templeton.override.enabled": "false",
+            "templeton.jar": "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar",
+            "templeton.zookeeper.hosts": "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181",
+            "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://c6402.ambari.apache.org:9083,hive.metastore.sasl.enabled=yes,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse",
+            "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage",
+            "templeton.hive.archive": "hdfs:///apps/webhcat/hive.tar.gz",
+            "templeton.streaming.jar": "hdfs:///apps/webhcat/hadoop-streaming.jar",
+            "templeton.port": "50111",
+            "templeton.libjars": "/usr/lib/zookeeper/zookeeper.jar",
+            "templeton.hadoop": "/usr/bin/hadoop",
+            "templeton.hive.path": "hive.tar.gz/hive/bin/hive",
+            "templeton.hadoop.conf.dir": "/etc/hadoop/conf",
+            "templeton.hcat": "/usr/bin/hcat",
             "templeton.pig.archive": "hdfs:///apps/webhcat/pig.tar.gz",
             "templeton.sqoop.archive": "hdfs:///apps/webhcat/sqoop.tar.gz"
-        }, 
+        },
         "capacity-scheduler": {
-            "yarn.scheduler.capacity.node-locality-delay": "40", 
-            "yarn.scheduler.capacity.root.capacity": "100", 
+            "yarn.scheduler.capacity.node-locality-delay": "40",
+            "yarn.scheduler.capacity.root.capacity": "100",
             "yarn.scheduler.capacity.root.acl_administer_queue": "*",
-            "yarn.scheduler.capacity.root.queues": "default", 
-            "yarn.scheduler.capacity.maximum-applications": "10000", 
+            "yarn.scheduler.capacity.root.queues": "default",
+            "yarn.scheduler.capacity.maximum-applications": "10000",
             "yarn.scheduler.capacity.root.default.user-limit-factor": "1",
-            "yarn.scheduler.capacity.root.default.maximum-capacity": "100", 
-            "yarn.scheduler.capacity.root.default.state": "RUNNING", 
-            "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2", 
-            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*", 
-            "yarn.scheduler.capacity.root.default.capacity": "100", 
+            "yarn.scheduler.capacity.root.default.maximum-capacity": "100",
+            "yarn.scheduler.capacity.root.default.state": "RUNNING",
+            "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2",
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*",
+            "yarn.scheduler.capacity.root.default.capacity": "100",
             "yarn.scheduler.capacity.root.default.acl_submit_applications": "*"
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.support.append": "true", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.block.access.token.enable": "true",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}",
             "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
             "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}", 
             "dfs.cluster.administrators": "test_user1,test_user2 hdfs,test_group",
-            "dfs.replication": "3", 
-            "ambari.dfs.datanode.http.port": "50075", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1.0f", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.permissions.enabled": "true", 
-            "fs.checkpoint.size": "67108864", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal", 
-            "dfs.blocksize": "134217728", 
-            "dfs.datanode.max.transfer.threads": "1024", 
-            "dfs.datanode.du.reserved": "1073741824", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.namenode.handler.count": "100", 
+            "dfs.replication": "3",
+            "ambari.dfs.datanode.http.port": "50075",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1.0f",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.permissions.enabled": "true",
+            "fs.checkpoint.size": "67108864",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
+            "dfs.blocksize": "134217728",
+            "dfs.datanode.max.transfer.threads": "1024",
+            "dfs.datanode.du.reserved": "1073741824",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.namenode.handler.count": "100",
             "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary,/hadoop/hdfs/namesecondary2",
-            "fs.permissions.umask-mode": "022", 
+            "fs.permissions.umask-mode": "022",
             "dfs.datanode.http.address": "0.0.0.0:50075",
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "ambari.dfs.datanode.port": "50010", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.https.port": "50470", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.replication.max": "50", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.data.dir": "/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.namenode.accesstime.precision": "0",
+            "ambari.dfs.datanode.port": "50010",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.heartbeat.interval": "3",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.https.port": "50470",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/hadoop/hdfs/namenode"
-        }, 
+        },
         "hbase-site": {
-            "hbase.hstore.flush.retries.number": "120", 
-            "hbase.client.keyvalue.maxsize": "10485760", 
-            "hbase.hstore.compactionThreshold": "3", 
-            "hbase.rootdir": "hdfs://c6401.ambari.apache.org:8020/apps/hbase/data", 
-            "hbase.regionserver.handler.count": "60", 
-            "hbase.regionserver.global.memstore.lowerLimit": "0.38", 
-            "hbase.hregion.memstore.block.multiplier": "2", 
-            "hbase.hregion.memstore.flush.size": "134217728", 
-            "hbase.superuser": "hbase", 
-            "hbase.zookeeper.property.clientPort": "2181", 
-            "hbase.regionserver.global.memstore.upperLimit": "0.4", 
-            "zookeeper.session.timeout": "30000", 
-            "hbase.tmp.dir": "/hadoop/hbase", 
-            "hbase.local.dir": "${hbase.tmp.dir}/local", 
-            "hbase.hregion.max.filesize": "10737418240", 
-            "hfile.block.cache.size": "0.40", 
-            "hbase.security.authentication": "simple", 
-            "hbase.defaults.for.version.skip": "true", 
-            "hbase.zookeeper.quorum": "c6401.ambari.apache.org,c6402.ambari.apache.org", 
-            "zookeeper.znode.parent": "/hbase-unsecure", 
+            "hbase.hstore.flush.retries.number": "120",
+            "hbase.client.keyvalue.maxsize": "10485760",
+            "hbase.hstore.compactionThreshold": "3",
+            "hbase.rootdir": "hdfs://c6401.ambari.apache.org:8020/apps/hbase/data",
+            "hbase.regionserver.handler.count": "60",
+            "hbase.regionserver.global.memstore.lowerLimit": "0.38",
+            "hbase.hregion.memstore.block.multiplier": "2",
+            "hbase.hregion.memstore.flush.size": "134217728",
+            "hbase.superuser": "hbase",
+            "hbase.zookeeper.property.clientPort": "2181",
+            "hbase.regionserver.global.memstore.upperLimit": "0.4",
+            "zookeeper.session.timeout": "30000",
+            "hbase.tmp.dir": "/hadoop/hbase",
+            "hbase.local.dir": "${hbase.tmp.dir}/local",
+            "hbase.hregion.max.filesize": "10737418240",
+            "hfile.block.cache.size": "0.40",
+            "hbase.security.authentication": "simple",
+            "hbase.defaults.for.version.skip": "true",
+            "hbase.zookeeper.quorum": "c6401.ambari.apache.org,c6402.ambari.apache.org",
+            "zookeeper.znode.parent": "/hbase-unsecure",
             "hbase.hstore.blockingStoreFiles": "10",
             "hbase.master.port": "60000",
-            "hbase.hregion.majorcompaction": "86400000", 
-            "hbase.security.authorization": "false", 
-            "hbase.cluster.distributed": "true", 
-            "hbase.hregion.memstore.mslab.enabled": "true", 
-            "hbase.client.scanner.caching": "100", 
+            "hbase.hregion.majorcompaction": "86400000",
+            "hbase.security.authorization": "false",
+            "hbase.cluster.distributed": "true",
+            "hbase.hregion.memstore.mslab.enabled": "true",
+            "hbase.client.scanner.caching": "100",
             "hbase.zookeeper.useMulti": "true"
-        }, 
+        },
         "core-site": {
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "gluster.daemon.user": "null", 
-            "hadoop.proxyuser.oozie.groups": "users", 
-            "hadoop.proxyuser.hcat.hosts": "c6402.ambari.apache.org", 
-            "hadoop.proxyuser.hive.groups": "users", 
-            "hadoop.security.authentication": "simple", 
-            "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
-            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "fs.AbstractFileSystem.glusterfs.impl": "null", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
-            "fs.trash.interval": "360", 
-            "ipc.client.idlethreshold": "8000", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.security.authorization": "false", 
-            "hadoop.proxyuser.hive.hosts": "c6402.ambari.apache.org", 
-            "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT", 
-            "hadoop.proxyuser.hcat.groups": "users", 
-            "ipc.client.connection.maxidletime": "30000", 
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "gluster.daemon.user": "null",
+            "hadoop.proxyuser.oozie.groups": "users",
+            "hadoop.proxyuser.hcat.hosts": "c6402.ambari.apache.org",
+            "hadoop.proxyuser.hive.groups": "users",
+            "hadoop.security.authentication": "simple",
+            "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org",
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "fs.AbstractFileSystem.glusterfs.impl": "null",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+            "fs.trash.interval": "360",
+            "ipc.client.idlethreshold": "8000",
+            "io.file.buffer.size": "131072",
+            "hadoop.security.authorization": "false",
+            "hadoop.proxyuser.hive.hosts": "c6402.ambari.apache.org",
+            "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT",
+            "hadoop.proxyuser.hcat.groups": "users",
+            "ipc.client.connection.maxidletime": "30000",
             "ipc.client.connect.max.retries": "50"
-        }, 
+        },
         "hive-site": {
-            "hive.enforce.sorting": "true", 
-            "javax.jdo.option.ConnectionPassword": "!`\"' 1", 
-            "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver", 
-            "hive.optimize.bucketmapjoin.sortedmerge": "true", 
-            "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider", 
-            "fs.file.impl.disable.cache": "true", 
-            "hive.auto.convert.join.noconditionaltask": "true", 
-            "hive.map.aggr": "true", 
-            "hive.optimize.index.filter": "true", 
-            "hive.security.authorization.enabled": "false", 
-            "hive.optimize.reducededuplication.min.reducer": "1", 
-            "hive.optimize.bucketmapjoin": "true", 
-            "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9083", 
-            "hive.mapjoin.bucket.cache.size": "10000", 
-            "hive.auto.convert.join.noconditionaltask.size": "1000000000", 
-            "hive.vectorized.execution.enabled": "false", 
-            "javax.jdo.option.ConnectionUserName": "hive", 
-            "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order", 
-            "hive.optimize.reducededuplication": "true", 
-            "hive.metastore.warehouse.dir": "/apps/hive/warehouse", 
+            "hive.enforce.sorting": "true",
+            "javax.jdo.option.ConnectionPassword": "!`\"' 1",
+            "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
+            "hive.optimize.bucketmapjoin.sortedmerge": "true",
+            "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+            "fs.file.impl.disable.cache": "true",
+            "hive.auto.convert.join.noconditionaltask": "true",
+            "hive.map.aggr": "true",
+            "hive.optimize.index.filter": "true",
+            "hive.security.authorization.enabled": "false",
+            "hive.optimize.reducededuplication.min.reducer": "1",
+            "hive.optimize.bucketmapjoin": "true",
+            "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9083",
+            "hive.mapjoin.bucket.cache.size": "10000",
+            "hive.auto.convert.join.noconditionaltask.size": "1000000000",
+            "hive.vectorized.execution.enabled": "false",
+            "javax.jdo.option.ConnectionUserName": "hive",
+            "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order",
+            "hive.optimize.reducededuplication": "true",
+            "hive.metastore.warehouse.dir": "/apps/hive/warehouse",
             "hive.metastore.client.socket.timeout": "60",
-            "hive.auto.convert.join": "true", 
-            "hive.enforce.bucketing": "true", 
-            "hive.mapred.reduce.tasks.speculative.execution": "false", 
-            "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator", 
-            "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true", 
-            "hive.auto.convert.sortmerge.join": "true", 
-            "fs.hdfs.impl.disable.cache": "true", 
-            "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider", 
-            "ambari.hive.db.schema.name": "hive", 
-            "hive.metastore.execute.setugi": "true", 
-            "hive.auto.convert.sortmerge.join.noconditionaltask": "true", 
+            "hive.auto.convert.join": "true",
+            "hive.enforce.bucketing": "true",
+            "hive.mapred.reduce.tasks.speculative.execution": "false",
+            "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator",
+            "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true",
+            "hive.auto.convert.sortmerge.join": "true",
+            "fs.hdfs.impl.disable.cache": "true",
+            "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+            "ambari.hive.db.schema.name": "hive",
+            "hive.metastore.execute.setugi": "true",
+            "hive.auto.convert.sortmerge.join.noconditionaltask": "true",
             "hive.server2.enable.doAs": "true",
             "hive.server2.authentication": "NOSASL",
             "hive.server2.transport.mode": "binary",
             "hive.optimize.mapjoin.mapreduce": "true",
             "hive.exec.scratchdir" : "/custompath/tmp/hive"
-        }, 
+        },
 		"ranger-hive-plugin-properties": {
-            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
-            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
-            "common.name.for.certificate": "-", 
-            "XAAUDIT.HDFS.IS_ENABLED": "false", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
-            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
-            "XAAUDIT.DB.IS_ENABLED": "true", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
-            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
-            "XAAUDIT.SOLR.IS_ENABLED": "false", 
-            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
-            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
-            "policy_user": "ambari-qa", 
-            "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true", 
-            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
-            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
-            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
-            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
-            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
-            "ranger-hive-plugin-enabled": "No", 
-            "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver", 
-            "REPOSITORY_CONFIG_USERNAME": "hive", 
-            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
-            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
-            "REPOSITORY_CONFIG_PASSWORD": "hive", 
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+            "common.name.for.certificate": "-",
+            "XAAUDIT.HDFS.IS_ENABLED": "false",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+            "XAAUDIT.DB.IS_ENABLED": "true",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+            "XAAUDIT.SOLR.IS_ENABLED": "false",
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+            "policy_user": "ambari-qa",
+            "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+            "SSL_TRUSTSTORE_PASSWORD": "changeit",
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+            "ranger-hive-plugin-enabled": "No",
+            "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver",
+            "REPOSITORY_CONFIG_USERNAME": "hive",
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+            "REPOSITORY_CONFIG_PASSWORD": "hive",
             "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         },
 		"ranger-knox-plugin-properties": {
-            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
-            "KNOX_HOME": "/usr/hdp/current/knox-server", 
-            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
-            "common.name.for.certificate": "-", 
-            "XAAUDIT.HDFS.IS_ENABLED": "false", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
-            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
-            "XAAUDIT.DB.IS_ENABLED": "true", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
-            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
-            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
-            "XAAUDIT.SOLR.IS_ENABLED": "false", 
-            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
-            "ranger-knox-plugin-enabled": "No", 
-            "policy_user": "ambari-qa", 
-            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
-            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
-            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
-            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
-            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
-            "REPOSITORY_CONFIG_USERNAME": "admin", 
-            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
-            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
-            "REPOSITORY_CONFIG_PASSWORD": "admin-password", 
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+            "KNOX_HOME": "/usr/hdp/current/knox-server",
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+            "common.name.for.certificate": "-",
+            "XAAUDIT.HDFS.IS_ENABLED": "false",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+            "XAAUDIT.DB.IS_ENABLED": "true",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+            "XAAUDIT.SOLR.IS_ENABLED": "false",
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+            "ranger-knox-plugin-enabled": "No",
+            "policy_user": "ambari-qa",
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+            "SSL_TRUSTSTORE_PASSWORD": "changeit",
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+            "REPOSITORY_CONFIG_USERNAME": "admin",
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+            "REPOSITORY_CONFIG_PASSWORD": "admin-password",
             "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         },
         "yarn-site": {
-            "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
-            "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", 
+            "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25",
+            "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor",
             "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/hadoop/yarn/local1",
-            "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025", 
-            "yarn.nodemanager.remote-app-log-dir-suffix": "logs", 
-            "yarn.resourcemanager.hostname": "c6402.ambari.apache.org", 
-            "yarn.nodemanager.health-checker.script.timeout-ms": "60000", 
-            "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler", 
-            "yarn.nodemanager.resource.memory-mb": "2048", 
-            "yarn.scheduler.minimum-allocation-mb": "683", 
-            "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050", 
-            "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030", 
-            "yarn.log-aggregation.retain-seconds": "2592000", 
-            "yarn.scheduler.maximum-allocation-mb": "2048", 
-            "yarn.log-aggregation-enable": "true", 
-            "yarn.nodemanager.address": "0.0.0.0:45454", 
-            "yarn.nodemanager.container-monitor.interval-ms": "3000", 
-            "yarn.nodemanager.log-aggregation.compression-type": "gz", 
-            "yarn.nodemanager.log.retain-second": "604800", 
-            "yarn.nodemanager.delete.debug-delay-sec": "0", 
+            "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025",
+            "yarn.nodemanager.remote-app-log-dir-suffix": "logs",
+            "yarn.resourcemanager.hostname": "c6402.ambari.apache.org",
+            "yarn.nodemanager.health-checker.script.timeout-ms": "60000",
+            "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler",
+            "yarn.nodemanager.resource.memory-mb": "2048",
+            "yarn.scheduler.minimum-allocation-mb": "683",
+            "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050",
+            "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030",
+            "yarn.log-aggregation.retain-seconds": "2592000",
+            "yarn.scheduler.maximum-allocation-mb": "2048",
+            "yarn.log-aggregation-enable": "true",
+            "yarn.nodemanager.address": "0.0.0.0:45454",
+            "yarn.nodemanager.container-monitor.interval-ms": "3000",
+            "yarn.nodemanager.log-aggregation.compression-type": "gz",
+            "yarn.nodemanager.log.retain-second": "604800",
+            "yarn.nodemanager.delete.debug-delay-sec": "0",
             "yarn.nodemanager.log-dirs": "/hadoop/yarn/log,/hadoop/yarn/log1",
-            "yarn.nodemanager.health-checker.interval-ms": "135000", 
-            "yarn.resourcemanager.am.max-attempts": "2", 
-            "yarn.nodemanager.remote-app-log-dir": "/app-logs", 
-            "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", 
-            "yarn.nodemanager.aux-services": "mapreduce_shuffle", 
-            "yarn.nodemanager.vmem-check-enabled": "false", 
-            "yarn.nodemanager.vmem-pmem-ratio": "2.1", 
-            "yarn.admin.acl": "*", 
-            "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088", 
-            "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude", 
-            "yarn.nodemanager.linux-container-executor.group": "hadoop", 
-            "yarn.acl.enable": "true", 
-            "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs", 
-            "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*", 
-            "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141", 
+            "yarn.nodemanager.health-checker.interval-ms": "135000",
+            "yarn.resourcemanager.am.max-attempts": "2",
+            "yarn.nodemanager.remote-app-log-dir": "/app-logs",
+            "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX",
+            "yarn.nodemanager.aux-services": "mapreduce_shuffle",
+            "yarn.nodemanager.vmem-check-enabled": "false",
+            "yarn.nodemanager.vmem-pmem-ratio": "2.1",
+            "yarn.admin.acl": "*",
+            "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088",
+            "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude",
+            "yarn.nodemanager.linux-container-executor.group": "hadoop",
+            "yarn.acl.enable": "true",
+            "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs",
+            "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*",
+            "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141",
             "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler",
             "yarn.timeline-service.leveldb-timeline-store.path": "/var/log/hadoop-yarn/timeline",
             "yarn.http.policy": "HTTP_ONLY",
@@ -485,45 +486,45 @@
             "tez.am.am-rm.heartbeat.interval-ms.max": "250"
         },
         "yarn-env": {
-            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
-            "apptimelineserver_heapsize": "1024", 
-            "nodemanager_heapsize": "1024", 
-            "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n  #echo \"run java in $JAVA_HOME\"\n  JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n  echo \"Error: JAVA_HOME is not set.\"\n  exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n  JAVA_HEAP_M
 AX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to 
 specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be
  appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n  YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n  YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n  YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=$
 {YARN_ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n  YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"", 
-            "yarn_heapsize": "1024", 
-            "yarn_user": "yarn", 
-            "resourcemanager_heapsize": "1024", 
+            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn",
+            "apptimelineserver_heapsize": "1024",
+            "nodemanager_heapsize": "1024",
+            "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n  #echo \"run java in $JAVA_HOME\"\n  JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n  echo \"Error: JAVA_HOME is not set.\"\n  exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n  JAVA_HEAP_M
 AX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to 
 specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be
  appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n  YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n  YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n  YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=$
 {YARN_ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n  YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"",
+            "yarn_heapsize": "1024",
+            "yarn_user": "yarn",
+            "resourcemanager_heapsize": "1024",
             "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
             "min_user_id": "1000",
             "is_supported_yarn_ranger": "false"
         },
         "hadoop-env": {
-            "namenode_opt_maxnewsize": "200m", 
+            "namenode_opt_maxnewsize": "200m",
             "hdfs_log_dir_prefix": "/var/log/hadoop",
-            "namenode_heapsize": "1024m", 
+            "namenode_heapsize": "1024m",
             "namenode_opt_newsize": "200m",
             "namenode_opt_permsize" : "128m",
             "namenode_opt_maxpermsize" : "256m",
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}
 }/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/
 $USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HAD
 OOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n#
  The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64", 
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}
 }/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/
 $USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HAD
 OOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n#
  The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
             "hdfs_user": "hdfs",
-            "dtnode_heapsize": "1024m", 
+            "dtnode_heapsize": "1024m",
             "proxyuser_group": "users",
-            "hadoop_heapsize": "1024", 
+            "hadoop_heapsize": "1024",
             "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
-            "hcat_pid_dir": "/var/run/webhcat", 
-            "hcat_user": "hcat", 
-            "hive_ambari_database": "MySQL", 
-            "hive_hostname": "abtest-3.c.pramod-thangali.internal", 
-            "hive_metastore_port": "9083", 
-            "webhcat_user": "hcat", 
-            "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n   if [ -z \"$DEBUG\" ]; then\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n   else\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n   fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\nexport HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Director
 y can be controlled by:\nexport HIVE_CONF_DIR={{conf_dir}}\n\n# Folder containing extra ibraries required for hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\nelif [ -d \"/usr/lib/hive-hcatalog/\" ]; then\n  export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar\nelse\n  export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar\nfi\nexport METASTORE_PORT={{hive_metastore_port}}", 
-            "hive_database_name": "hive", 
-            "hive_database_type": "mysql", 
-            "hive_pid_dir": "/var/run/hive", 
-            "hive_log_dir": "/var/log/hive", 
-            "hive_user": "hive", 
-            "hcat_log_dir": "/var/log/webhcat", 
+            "hcat_pid_dir": "/var/run/webhcat",
+            "hcat_user": "hcat",
+            "hive_ambari_database": "MySQL",
+            "hive_hostname": "abtest-3.c.pramod-thangali.internal",
+            "hive_metastore_port": "9083",
+            "webhcat_user": "hcat",
+            "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n   if [ -z \"$DEBUG\" ]; then\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n   else\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n   fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\nexport HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Director
 y can be controlled by:\nexport HIVE_CONF_DIR={{conf_dir}}\n\n# Folder containing extra ibraries required for hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\nelif [ -d \"/usr/lib/hive-hcatalog/\" ]; then\n  export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar\nelse\n  export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar\nfi\nexport METASTORE_PORT={{hive_metastore_port}}",
+            "hive_database_name": "hive",
+            "hive_database_type": "mysql",
+            "hive_pid_dir": "/var/run/hive",
+            "hive_log_dir": "/var/log/hive",
+            "hive_user": "hive",
+            "hcat_log_dir": "/var/log/webhcat",
             "hive_database": "New MySQL Database"
         },
         "ranger-env": {
@@ -545,10 +546,10 @@
           "fetch_nonlocal_groups": "true"
       },
       "hbase-env": {
-            "hbase_pid_dir": "/var/run/hbase", 
-            "hbase_user": "hbase", 
-            "hbase_master_heapsize": "1024m", 
-            "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport HBASE_OPTS=\"-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintG
 CDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\nexport HBASE_MASTER_OPTS=\"-Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms

<TRUNCATED>

[37/50] [abbrv] ambari git commit: AMBARI-15052. Service delete confirmation using Enter key doesn't remove service. (alexantonenko)

Posted by jo...@apache.org.
AMBARI-15052. Service delete confirmation using Enter key doesn't remove service. (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c9858260
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c9858260
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c9858260

Branch: refs/heads/branch-dev-patch-upgrade
Commit: c9858260547854f0b1077cdbe09e459ee5a7678b
Parents: 7f3928b
Author: Alex Antonenko <hi...@gmail.com>
Authored: Tue Feb 16 01:15:15 2016 +0200
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Tue Feb 16 11:21:09 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/controllers/main/service/item.js | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c9858260/ambari-web/app/controllers/main/service/item.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/item.js b/ambari-web/app/controllers/main/service/item.js
index 0b8d5d7..a127e92 100644
--- a/ambari-web/app/controllers/main/service/item.js
+++ b/ambari-web/app/controllers/main/service/item.js
@@ -1149,11 +1149,16 @@ App.MainServiceItemController = Em.Controller.extend(App.SupportClientConfigsDow
       bodyClass: Em.View.extend({
         confirmKey: confirmKey,
         template: Em.Handlebars.compile(message +
-        '<form class="form-inline align-center"></br>' +
+        '<div class="form-inline align-center"></br>' +
         '<label><b>{{t common.enter}}&nbsp;{{view.confirmKey}}</b></label>&nbsp;' +
         '{{view Ember.TextField valueBinding="view.parentView.confirmInput" class="input-small"}}</br>' +
-        '</form>')
-      })
+        '</div>')
+      }),
+
+      enterKeyPressed: function(e) {
+        if (this.get('disablePrimary')) return;
+        this.onPrimary();
+      }
     });
   },
 


[43/50] [abbrv] ambari git commit: AMBARI-15050 Https Support for Metrics System (dsen)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
index 0ce8e5a..95e931a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
@@ -811,6 +811,9 @@
         "ams-hbase-log4j": {
             "content": "\n"
         },
+        "ams-ssl-server": {
+            "content": "\n"
+        },
         "ams-site": {
             "timeline.metrics.host.aggregator.minute.ttl": "604800",
             "timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier": "1",
@@ -859,6 +862,7 @@
         "ams-hbase-site": {},
         "ams-hbase-policy": {},
         "ams-hbase-log4j": {},
+        "ams-ssl-server": {},
         "ams-site": {},
         "yarn-site": {
         "final": {
@@ -941,6 +945,9 @@
         "ams-site": {
             "tag": "version1"
         },
+        "ams-ssl-server": {
+            "tag": "version1"
+        },
         "ams-hbase-policy": {
             "tag": "version1"
         },


[45/50] [abbrv] ambari git commit: AMBARI-15050 Https Support for Metrics System (dsen)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/metainfo.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/metainfo.xml
index 66286b3..1970113 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/metainfo.xml
@@ -64,6 +64,7 @@
         <config-type>ranger-kafka-security</config-type>
         <config-type>zookeeper-env</config-type>
         <config-type>zoo.cfg</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
       <restartRequiredAfterChange>true</restartRequiredAfterChange>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka.py
index 1c01174..830d0ce 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka.py
@@ -74,6 +74,10 @@ def kafka(upgrade_type=None):
     if params.has_metric_collector:
       kafka_server_config['kafka.timeline.metrics.host'] = params.metric_collector_host
       kafka_server_config['kafka.timeline.metrics.port'] = params.metric_collector_port
+      kafka_server_config['kafka.timeline.metrics.protocol'] = params.metric_collector_protocol
+      kafka_server_config['kafka.timeline.metrics.truststore.path'] = params.metric_truststore_path
+      kafka_server_config['kafka.timeline.metrics.truststore.type'] = params.metric_truststore_type
+      kafka_server_config['kafka.timeline.metrics.truststore.password'] = params.metric_truststore_password
 
     kafka_data_dir = kafka_server_config['log.dirs']
     kafka_data_dirs = filter(None, kafka_data_dir.split(","))

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
index da76952..47af240 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
@@ -106,6 +106,10 @@ else:
 
 metric_collector_host = ""
 metric_collector_port = ""
+metric_collector_protocol = ""
+metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
 
 ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
 has_metric_collector = not len(ams_collector_hosts) == 0
@@ -125,6 +129,10 @@ if has_metric_collector:
       metric_collector_port = metric_collector_web_address.split(':')[1]
     else:
       metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
   pass
 # Security-related params
 security_enabled = config['configurations']['cluster-env']['security_enabled']
@@ -274,4 +282,4 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs
-)
\ No newline at end of file
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/metainfo.xml b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/metainfo.xml
index 1468a2f..804374a 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/metainfo.xml
@@ -134,6 +134,7 @@
         <config-type>ranger-admin-site</config-type>
         <config-type>zookeeper-env</config-type>
         <config-type>zoo.cfg</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
       <quickLinksConfigurations>
         <quickLinksConfiguration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
index 38d1951..25da2a1 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
@@ -165,7 +165,14 @@ if has_metric_collector:
 
   metric_collector_report_interval = 60
   metric_collector_app_id = "nimbus"
-
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+  pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 metric_collector_sink_jar = "/usr/lib/storm/lib/ambari-metrics-storm-sink*.jar"
@@ -296,4 +303,4 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs
-)
\ No newline at end of file
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/config.yaml.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/config.yaml.j2 b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/config.yaml.j2
index 01c61bf..2c09bc3 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/config.yaml.j2
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/config.yaml.j2
@@ -57,8 +57,12 @@ enableMetricsSink: True
 metrics_collector:
 
   reportInterval: {{metric_collector_report_interval}}
-  host: "{{metric_collector_host}}"
-  port: {{metric_collector_port}}
+  collector: "{{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}"
   appId: "{{metric_collector_app_id}}"
 
+  # HTTPS settings
+  truststore.path : "{{metric_truststore_path}}"
+  truststore.type : "{{metric_truststore_type}}"
+  truststore.password : "{{metric_truststore_password}}"
+
 {% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/storm-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/storm-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/storm-metrics2.properties.j2
index a5ee7a9..9acf173 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/storm-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/storm-metrics2.properties.j2
@@ -16,7 +16,11 @@
 # limitations under the License.
 #}
 
-collector={{metric_collector_host}}
-port={{metric_collector_port}}
+collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
 maxRowCacheSize=10000
-sendInterval={{metrics_report_interval}}000
\ No newline at end of file
+sendInterval={{metrics_report_interval}}000
+
+# HTTPS properties
+truststore.path = {{metric_truststore_path}}
+truststore.type = {{metric_truststore_type}}
+truststore.password = {{metric_truststore_password}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml
index 969c2a7..41e19a3 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml
@@ -150,6 +150,7 @@
         <config-type>core-site</config-type>
         <config-type>mapred-site</config-type>
         <config-type>yarn-log4j</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
       <widgetsFileName>YARN_widgets.json</widgetsFileName>
       <metricsFileName>YARN_metrics.json</metricsFileName>
@@ -259,6 +260,7 @@
         <config-type>ranger-yarn-audit</config-type>
         <config-type>ranger-yarn-policymgr-ssl</config-type>
         <config-type>ranger-yarn-security</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
       <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
       <widgetsFileName>MAPREDUCE2_widgets.json</widgetsFileName>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index b588f86..2a9d7c5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -118,6 +118,14 @@ if has_metric_collector:
       metric_collector_port = metric_collector_web_address.split(':')[1]
     else:
       metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
index 8e729a4..f9c2164 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -73,16 +73,21 @@ resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 *.sink.timeline.sendInterval={{metrics_report_interval}}000
 *.sink.timeline.slave.host.name = {{hostname}}
 
-datanode.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-namenode.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-resourcemanager.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-nodemanager.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-historyserver.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-journalnode.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-nimbus.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-supervisor.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-maptask.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-reducetask.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+# HTTPS properties
+*.sink.timeline.truststore.path = {{metric_truststore_path}}
+*.sink.timeline.truststore.type = {{metric_truststore_type}}
+*.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+datanode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+namenode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+resourcemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+nodemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+historyserver.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+journalnode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+nimbus.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+supervisor.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+maptask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+reducetask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
 
 resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/java/org/apache/ambari/server/configuration/ComponentSSLConfigurationTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/configuration/ComponentSSLConfigurationTest.java b/ambari-server/src/test/java/org/apache/ambari/server/configuration/ComponentSSLConfigurationTest.java
index 58bc47e..92a7ef5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/configuration/ComponentSSLConfigurationTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/configuration/ComponentSSLConfigurationTest.java
@@ -29,12 +29,12 @@ import org.junit.Test;
 public class ComponentSSLConfigurationTest {
 
   public static ComponentSSLConfiguration getConfiguration(String path,
-      String pass, String type, boolean gangliaSSL) {
+      String pass, String type, boolean isSslEnabled) {
     Properties ambariProperties = new Properties();
     ambariProperties.setProperty(Configuration.SSL_TRUSTSTORE_PATH_KEY, path);
     ambariProperties.setProperty(Configuration.SSL_TRUSTSTORE_PASSWORD_KEY, pass);
     ambariProperties.setProperty(Configuration.SSL_TRUSTSTORE_TYPE_KEY, type);
-    ambariProperties.setProperty(Configuration.GANGLIA_HTTPS_KEY, Boolean.toString(gangliaSSL));
+    ambariProperties.setProperty(Configuration.AMRABI_METRICS_HTTPS_ENABLED_KEY, Boolean.toString(isSslEnabled));
 
     Configuration configuration =  new TestConfiguration(ambariProperties);
 
@@ -70,7 +70,7 @@ public class ComponentSSLConfigurationTest {
   public void testIsGangliaSSL() throws Exception {
     ComponentSSLConfiguration sslConfiguration = getConfiguration("tspath",
         "tspass", "tstype", true);
-    Assert.assertTrue(sslConfiguration.isGangliaSSL());
+    Assert.assertTrue(sslConfiguration.isHttpsEnabled());
   }
 
   private static class TestConfiguration extends Configuration {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProviderTest.java
index b513ba5..8611e68 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProviderTest.java
@@ -220,7 +220,7 @@ public class GangliaPropertyProviderTest {
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
 
-    String expected = (configuration.isGangliaSSL() ? "https" : "http") +
+    String expected = (configuration.isHttpsEnabled() ? "https" : "http") +
         "://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPDataNode%2CHDPSlaves&h=domU-12-31-39-0E-34-E1.compute-1.internal&m=jvm.metrics.gcCount&s=10&e=20&r=1";
     Assert.assertEquals(expected, streamProvider.getLastSpec());
 
@@ -269,7 +269,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
 
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPTaskTracker,HDPSlaves");
@@ -418,7 +418,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder uriBuilder = new URIBuilder();
 
-    uriBuilder.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    uriBuilder.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     uriBuilder.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     uriBuilder.setPath("/cgi-bin/rrd.py");
     uriBuilder.setParameter("c", "HDPJobTracker,HDPHBaseMaster,HDPResourceManager,HDPFlumeServer,HDPSlaves,HDPHistoryServer,HDPJournalNode,HDPTaskTracker,HDPHBaseRegionServer,HDPNameNode");
@@ -435,7 +435,7 @@ public class GangliaPropertyProviderTest {
         "HDPSlaves", "HDPHistoryServer", "HDPJournalNode", "HDPTaskTracker", "HDPHBaseRegionServer", "HDPNameNode"});
     List<String> hosts = Arrays.asList(new String[]{"domU-12-31-39-0E-34-E3.compute-1.internal", "domU-12-31-39-0E-34-E1.compute-1.internal",
         "domU-12-31-39-0E-34-E2.compute-1.internal"});
-    int httpsVariation = configuration.isGangliaSSL() ? 1 : 0;
+    int httpsVariation = configuration.isHttpsEnabled() ? 1 : 0;
 
     Assert.assertEquals(expected.substring(0, 66 + httpsVariation), streamProvider.getLastSpec().substring(0, 66 + httpsVariation));
     Assert.assertTrue(CollectionPresentationUtils.isStringPermutationOfCollection(streamProvider.getLastSpec().substring(66 + httpsVariation, 236 + httpsVariation), components, "%2C", 0, 0));
@@ -487,7 +487,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
     
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPJobTracker,HDPHBaseMaster,HDPResourceManager,HDPFlumeServer,HDPSlaves,HDPHistoryServer,HDPJournalNode,HDPTaskTracker,HDPHBaseRegionServer,HDPNameNode");
@@ -543,7 +543,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
     
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
@@ -606,7 +606,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
 
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
@@ -653,12 +653,12 @@ public class GangliaPropertyProviderTest {
 
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
-    String expected = (configuration.isGangliaSSL() ? "https" : "http") +
+    String expected = (configuration.isHttpsEnabled() ? "https" : "http") +
         "://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPFlumeServer%2CHDPSlaves&h=ip-10-39-113-33.ec2.internal&m=";
 
     // Depends on hashing, string representation can be different
     List<String> components = Arrays.asList(new String[]{"HDPFlumeServer", "HDPSlaves"});
-    int httpsVariation = configuration.isGangliaSSL() ? 1 : 0;
+    int httpsVariation = configuration.isHttpsEnabled() ? 1 : 0;
 
     Assert.assertEquals(expected.substring(0, 66 + httpsVariation), streamProvider.getLastSpec().substring(0, 66 + httpsVariation));
     Assert.assertTrue(CollectionPresentationUtils.isStringPermutationOfCollection(streamProvider.getLastSpec().substring(66 + httpsVariation, 92 + httpsVariation), components, "%2C", 0, 0));
@@ -704,7 +704,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
 
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
@@ -762,7 +762,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
 
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
@@ -821,7 +821,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
 
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
@@ -880,7 +880,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
 
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProviderTest.java
index 0f01b24..2ec9e4f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProviderTest.java
@@ -95,7 +95,7 @@ public class GangliaReportPropertyProviderTest {
 
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
-    String expected = (configuration.isGangliaSSL() ? "https" : "http") + "://domU-12-31-39-0E-34-E1.compute-1.internal/ganglia/graph.php?g=load_report&json=1";
+    String expected = (configuration.isHttpsEnabled() ? "https" : "http") + "://domU-12-31-39-0E-34-E1.compute-1.internal/ganglia/graph.php?g=load_report&json=1";
     Assert.assertEquals(expected, streamProvider.getLastSpec());
 
     Assert.assertEquals(2, PropertyHelper.getProperties(resource).size());

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProviderTest.java
index 1394081..3adf9f7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProviderTest.java
@@ -215,7 +215,7 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "cpu_user");
     uriBuilder.addParameter("hostname", "h1");
     uriBuilder.addParameter("appId", "HOST");
@@ -264,7 +264,7 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(res);
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "cpu_user");
     uriBuilder.addParameter("hostname", "h1");
     uriBuilder.addParameter("appId", "HOST");
@@ -309,12 +309,12 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "cpu_user,mem_free");
     uriBuilder.addParameter("hostname", "h1");
     uriBuilder.addParameter("appId", "HOST");
 
-    URIBuilder uriBuilder2 = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder2 = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder2.addParameter("metricNames", "mem_free,cpu_user");
     uriBuilder2.addParameter("hostname", "h1");
     uriBuilder2.addParameter("appId", "HOST");
@@ -367,14 +367,14 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder1 = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder1 = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder1.addParameter("metricNames", "cpu_user,mem_free");
     uriBuilder1.addParameter("hostname", "h1");
     uriBuilder1.addParameter("appId", "HOST");
     uriBuilder1.addParameter("startTime", "1416445244701");
     uriBuilder1.addParameter("endTime", "1416448936564");
 
-    URIBuilder uriBuilder2 = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder2 = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder2.addParameter("metricNames", "mem_free,cpu_user");
     uriBuilder2.addParameter("hostname", "h1");
     uriBuilder2.addParameter("appId", "HOST");
@@ -435,7 +435,7 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "yarn.QueueMetrics.Queue=root.AvailableMB");
     uriBuilder.addParameter("appId", "RESOURCEMANAGER");
     uriBuilder.addParameter("startTime", "1416528759233");
@@ -484,7 +484,7 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "rpc.rpc.RpcQueueTimeAvgTime");
     uriBuilder.addParameter("appId", "NAMENODE");
     uriBuilder.addParameter("startTime", "1416528759233");
@@ -558,7 +558,7 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "regionserver.Server.totalRequestCount");
     uriBuilder.addParameter("appId", "AMS-HBASE");
     uriBuilder.addParameter("startTime", "1421694000");
@@ -631,7 +631,7 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "rpc.rpc.NumOpenConnections._sum");
     uriBuilder.addParameter("appId", "HBASE");
     uriBuilder.addParameter("startTime", "1429824611300");
@@ -675,7 +675,7 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "cpu_user");
     uriBuilder.addParameter("hostname", "h1");
     uriBuilder.addParameter("appId", "HOST");
@@ -771,7 +771,7 @@ public class AMSPropertyProviderTest {
     Assert.assertNotNull(hostMetricSpec);
     Assert.assertNotNull(hostComponentMetricsSpec);
     // Verify calls
-    URIBuilder uriBuilder1 = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder1 = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder1.addParameter("metricNames", "dfs.datanode.BlocksReplicated");
     uriBuilder1.addParameter("hostname", "h1");
     uriBuilder1.addParameter("appId", "DATANODE");
@@ -779,7 +779,7 @@ public class AMSPropertyProviderTest {
     uriBuilder1.addParameter("endTime", "1416448936464");
     Assert.assertEquals(uriBuilder1.toString(), hostComponentMetricsSpec);
 
-    URIBuilder uriBuilder2 = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder2 = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder2.addParameter("metricNames", "cpu_user");
     uriBuilder2.addParameter("hostname", "h1");
     uriBuilder2.addParameter("appId", "HOST");
@@ -904,7 +904,7 @@ public class AMSPropertyProviderTest {
     Set<String> specs = streamProvider.getAllSpecs();
     Assert.assertEquals(2, specs.size());
 
-    URIBuilder uriBuilder1 = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder1 = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     Number[][] val;
 
     for (String spec : specs) {
@@ -958,7 +958,7 @@ public class AMSPropertyProviderTest {
 
     @Override
     public String getCollectorPort(String clusterName, MetricsService service) throws SystemException {
-      return "8188";
+      return "6188";
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProviderTest.java
index 1a3afbc..b96d45a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProviderTest.java
@@ -93,7 +93,7 @@ public class AMSReportPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "cpu_user");
     uriBuilder.addParameter("appId", "HOST");
     uriBuilder.addParameter("startTime", "1416445244800");
@@ -136,7 +136,7 @@ public class AMSReportPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "cpu_user._sum");
     uriBuilder.addParameter("appId", "HOST");
     uriBuilder.addParameter("startTime", "1432033257812");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
index 1c83bb7..96e2286 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
@@ -131,6 +131,14 @@ class TestMetricsCollector(RMFTestCase):
                               configurations = self.getConfig()['configurations']['ams-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['ams-hbase-site']
     )
+
+    self.assertResourceCalled('XmlConfig', 'ssl-server.xml',
+                              owner = 'ams',
+                              group = 'hadoop',
+                              conf_dir = '/etc/ambari-metrics-collector/conf',
+                              configurations = self.getConfig()['configurations']['ams-ssl-server'],
+                              configuration_attributes = self.getConfig()['configuration_attributes']['ams-ssl-server']
+    )
     merged_ams_hbase_site = {}
     merged_ams_hbase_site.update(self.getConfig()['configurations']['ams-hbase-site'])
     merged_ams_hbase_site['phoenix.query.maxGlobalMemoryPercentage'] = '25'


[18/50] [abbrv] ambari git commit: AMBARI-15021. Set AMBARI_METRICS as the default datasource. (Prajwal Rao via yusaku)

Posted by jo...@apache.org.
AMBARI-15021. Set AMBARI_METRICS as the default datasource. (Prajwal Rao via yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c47fff35
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c47fff35
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c47fff35

Branch: refs/heads/branch-dev-patch-upgrade
Commit: c47fff3576382e0e4efcd9f8b952b7362cec4390
Parents: 6eed333
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Thu Feb 11 19:12:39 2016 -0800
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Thu Feb 11 19:12:39 2016 -0800

----------------------------------------------------------------------
 .../0.1.0/package/templates/metrics_grafana_datasource.json.j2   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c47fff35/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metrics_grafana_datasource.json.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metrics_grafana_datasource.json.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metrics_grafana_datasource.json.j2
index a803da5..da04668 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metrics_grafana_datasource.json.j2
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metrics_grafana_datasource.json.j2
@@ -28,6 +28,6 @@
   "basicAuthUser": "",
   "basicAuthPassword": "",
   "withCredentials": false,
-  "isDefault": false,
+  "isDefault": true,
   "jsonData": {}
-}
\ No newline at end of file
+}


[48/50] [abbrv] ambari git commit: AMBARI-15059. AUI for user home directory creation (akovalenko)

Posted by jo...@apache.org.
AMBARI-15059. AUI for user home directory creation (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a4f8a956
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a4f8a956
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a4f8a956

Branch: refs/heads/branch-dev-patch-upgrade
Commit: a4f8a9567b68c755eb78438233959e50d2095a41
Parents: aa06ebe
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Tue Feb 16 18:46:51 2016 +0200
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Tue Feb 16 19:34:59 2016 +0200

----------------------------------------------------------------------
 .../main/resources/ui/admin-web/app/index.html  |  1 +
 .../loginActivities/HomeDirectoryCtrl.js        | 31 ++++++++++
 .../ui/admin-web/app/scripts/i18n.config.js     | 11 +++-
 .../resources/ui/admin-web/app/styles/main.css  |  3 +
 .../views/loginActivities/homeDirectory.html    | 63 +++++++++++++++++++-
 .../app/views/loginActivities/loginMessage.html |  2 +-
 6 files changed, 107 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f8a956/ambari-admin/src/main/resources/ui/admin-web/app/index.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/index.html b/ambari-admin/src/main/resources/ui/admin-web/app/index.html
index fd2c6b8..e7cda02 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/index.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/index.html
@@ -130,6 +130,7 @@
     <script src="scripts/controllers/authentication/AuthenticationMainCtrl.js"></script>
     <script src="scripts/controllers/loginActivities/LoginActivitiesMainCtrl.js"></script>
     <script src="scripts/controllers/loginActivities/LoginMessageMainCtrl.js"></script>
+    <script src="scripts/controllers/loginActivities/HomeDirectoryCtrl.js"></script>
     <script src="scripts/controllers/users/UsersCreateCtrl.js"></script>
     <script src="scripts/controllers/users/UsersListCtrl.js"></script>
     <script src="scripts/controllers/users/UsersShowCtrl.js"></script>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f8a956/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/loginActivities/HomeDirectoryCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/loginActivities/HomeDirectoryCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/loginActivities/HomeDirectoryCtrl.js
new file mode 100644
index 0000000..582b68b
--- /dev/null
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/loginActivities/HomeDirectoryCtrl.js
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+'use strict';
+
+angular.module('ambariAdminConsole')
+  .controller('HomeDirectoryCtrl',['$scope', function($scope) {
+
+      $scope.TEMPLATE_PLACEHOLER = '/user/{{username}}';
+
+      $scope.autoCreate = false;
+      $scope.template = '';
+      $scope.group = '';
+      $scope.permissions = '';
+
+      $scope.save = function () {}
+  }]);

http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f8a956/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
index 327ae03..0c67831 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
@@ -75,6 +75,8 @@ angular.module('ambariAdminConsole')
       'warning': 'Warning',
       'filterInfo': '{{showed}} of {{total}} {{term}} showing',
       'usersGroups': 'Users/Groups',
+      'enabled': 'Enabled',
+      'disabled': 'Disabled',
 
       'clusterNameChangeConfirmation': {
         'title': 'Confirm Cluster Name Change',
@@ -92,8 +94,13 @@ angular.module('ambariAdminConsole')
         'message': 'Message',
         'buttonText': 'Button',
         'status': 'Status',
-        'status.enabled': 'Enabled',
-        'status.disabled': 'Disabled'
+        'status.disabled': 'Disabled',
+        'homeDirectory.alert': 'Many Ambari Views store user preferences in the logged in user\'s / user directory in HDFS. Optionally, Ambari can auto-create these directories for users on login.',
+        'homeDirectory.autoCreate': 'Auto-Create HDFS user directories',
+        'homeDirectory.header': 'User Directory Creation Options',
+        'homeDirectory.template': 'User Directory creation template',
+        'homeDirectory.group': 'Default Group',
+        'homeDirectory.permissions': 'Permissions'
       },
 
       'controls': {

http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f8a956/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css b/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
index cc57fa3..957e4be 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
@@ -687,6 +687,9 @@ table.no-border tr td{
 .login-message-pane i.inactive {color: #d9534f;margin-top: 2px;}
 .login-message-pane .on-off-switch-wrap {height:32px;}
 
+.home-directory-pane .separator {
+  margin-top: 10px;
+}
 /*.login-message-pane .well {height: 74px;}
 .login-message-pane input {margin-left: 3px;}*/
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f8a956/ambari-admin/src/main/resources/ui/admin-web/app/views/loginActivities/homeDirectory.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/loginActivities/homeDirectory.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/loginActivities/homeDirectory.html
index a37e281..ee5d860 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/loginActivities/homeDirectory.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/loginActivities/homeDirectory.html
@@ -16,4 +16,65 @@
 * limitations under the License.
 -->
 
-Home Directory
\ No newline at end of file
+<br/>
+<div class="home-directory-pane" ng-controller="HomeDirectoryCtrl">
+  <form class="form-horizontal" novalidate name="form" autocomplete="off">
+    <div class="well">
+      <div class="alert alert-info">
+        {{'common.loginActivities.homeDirectory.alert' | translate}}
+      </div>
+      <fieldset>
+        <div class="form-group">
+          <label class="col-sm-4 control-label">{{'common.loginActivities.homeDirectory.autoCreate' | translate}}</label>
+          <div class="col-sm-8">
+            <toggle-switch model="autoCreate" on-label="{{'common.enabled' | translate}}" off-label="{{'common.disabled' | translate}}" class="switch-primary"></toggle-switch>
+          </div>
+          <input type="checkbox" name="autoCreate" class="hidden" ng-model="autoCreate">
+        </div>
+        <h4>{{'common.loginActivities.homeDirectory.header' | translate}}</h4>
+        <hr class="separator"/>
+        <div class="form-group">
+          <label class="col-sm-4 control-label">{{'common.loginActivities.homeDirectory.template' | translate}}</label>
+          <div class="col-sm-8">
+            <input type="text"
+                   class="form-control"
+                   name="template"
+                   placeholder="{{TEMPLATE_PLACEHOLER}}"
+                   ng-model="template"
+                   ng-disabled="!autoCreate"
+                   autocomplete="off">
+          </div>
+        </div>
+        <div class="form-group">
+          <label class="col-sm-4 control-label">{{'common.loginActivities.homeDirectory.group' | translate}}</label>
+          <div class="col-sm-8">
+            <input type="text"
+                   class="form-control"
+                   name="template"
+                   placeholder="users"
+                   ng-model="group"
+                   ng-disabled="!autoCreate"
+                   autocomplete="off">
+          </div>
+        </div>
+        <div class="form-group">
+          <label class="col-sm-4 control-label">{{'common.loginActivities.homeDirectory.permissions' | translate}}</label>
+          <div class="col-sm-8">
+            <input type="text"
+                   class="form-control"
+                   name="template"
+                   placeholder="750"
+                   ng-model="permissions"
+                   ng-disabled="!autoCreate"
+                   autocomplete="off">
+          </div>
+        </div>
+          <button
+              class="btn btn-primary pull-right"
+              ng-click="save()">
+            {{'common.controls.save' | translate}}
+          </button>
+      </fieldset>
+    </div>
+  </form>
+</div>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f8a956/ambari-admin/src/main/resources/ui/admin-web/app/views/loginActivities/loginMessage.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/loginActivities/loginMessage.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/loginActivities/loginMessage.html
index 1daf54c..96217f5 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/loginActivities/loginMessage.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/loginActivities/loginMessage.html
@@ -24,7 +24,7 @@
         <div class="form-group">
           <label class="col-sm-2 control-label">{{'common.loginActivities.status' | translate}}</label>
           <div class="col-sm-10">
-            <toggle-switch ng-click="changeStatus();" model="status" on-label="{{'common.loginActivities.status.enabled' | translate}}" off-label="{{'common.loginActivities.status.disabled' | translate}}" class="switch-primary userstatus" data-off-color="disabled"></toggle-switch>
+            <toggle-switch ng-click="changeStatus();" model="status" on-label="{{'common.enabled' | translate}}" off-label="{{'common.disabled' | translate}}" class="switch-primary userstatus" data-off-color="disabled"></toggle-switch>
           </div>
           <input type="checkbox" name="status" class="hidden" ng-model="status">
         </div>


[14/50] [abbrv] ambari git commit: AMBARI-15004. RU/EU: Upgrading Oozie database fails since new configs are not yet written to /usr/hdp/current/oozie-server/conf (alejandro)

Posted by jo...@apache.org.
AMBARI-15004. RU/EU: Upgrading Oozie database fails since new configs are not yet written to /usr/hdp/current/oozie-server/conf (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1ead2505
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1ead2505
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1ead2505

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 1ead250562513b38e7f2eb57df7cd4bb1d68f06e
Parents: 8aab632
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Thu Feb 11 13:50:47 2016 -0800
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Thu Feb 11 17:09:45 2016 -0800

----------------------------------------------------------------------
 .../state/stack/upgrade/ClusterGrouping.java    |  7 ++++
 .../state/stack/upgrade/ExecuteHostType.java    |  7 ++++
 .../state/stack/upgrade/TaskWrapperBuilder.java | 15 +++++++-
 .../4.0.0.2.0/package/scripts/oozie_server.py   | 36 ++++++++++++++------
 .../package/scripts/oozie_server_upgrade.py     |  4 ++-
 .../4.0.0.2.0/package/scripts/params_linux.py   |  3 ++
 .../HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml |  3 +-
 .../HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml |  5 ++-
 .../HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml |  5 ++-
 .../HDP/2.2/upgrades/nonrolling-upgrade-2.4.xml |  5 ++-
 .../stacks/HDP/2.2/upgrades/upgrade-2.2.xml     |  5 ++-
 .../stacks/HDP/2.2/upgrades/upgrade-2.3.xml     |  5 ++-
 .../stacks/HDP/2.2/upgrades/upgrade-2.4.xml     |  5 ++-
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml |  5 ++-
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml |  5 ++-
 .../stacks/HDP/2.3/upgrades/upgrade-2.3.xml     |  5 ++-
 .../stacks/HDP/2.3/upgrades/upgrade-2.4.xml     |  5 ++-
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml |  5 ++-
 .../stacks/HDP/2.4/upgrades/upgrade-2.4.xml     |  5 ++-
 19 files changed, 109 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
index 5e21da5..8fb6ef5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
@@ -232,6 +232,13 @@ public class ClusterGrouping extends Grouping {
           realHosts = Collections.singleton(hosts.hosts.iterator().next());
         }
 
+        // Pick the first host sorted alphabetically (case insensitive)
+        if (ExecuteHostType.FIRST == et.hosts && !hosts.hosts.isEmpty()) {
+          List<String> sortedHosts = new ArrayList<>(hosts.hosts);
+          Collections.sort(sortedHosts, String.CASE_INSENSITIVE_ORDER);
+          realHosts = Collections.singleton(sortedHosts.get(0));
+        }
+
         // !!! cannot execute against empty hosts (safety net)
         if (realHosts.isEmpty()) {
           return null;

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ExecuteHostType.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ExecuteHostType.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ExecuteHostType.java
index b36dca4..80deb60 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ExecuteHostType.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ExecuteHostType.java
@@ -42,6 +42,13 @@ public enum ExecuteHostType {
   ANY,
 
   /**
+   * Run on a single host that is picked by alphabetically sorting all hosts that satisfy the condition of the {@link ExecuteTask}
+   * .
+   */
+  @XmlEnumValue("first")
+  FIRST,
+
+  /**
    * Run on all of the hosts.
    */
   @XmlEnumValue("all")

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
index 81a3a4d..f2ef8f0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
@@ -75,7 +75,20 @@ public class TaskWrapperBuilder {
             collection.add(new TaskWrapper(service, component, Collections.singleton(hostsType.hosts.iterator().next()), params, t));
             continue;
           } else {
-            LOG.error(MessageFormat.format("Found an Execute task for {0} and {1} meant to run on a any host but could not find host to run on. Skipping this task.", service, component));
+            LOG.error(MessageFormat.format("Found an Execute task for {0} and {1} meant to run on any host but could not find host to run on. Skipping this task.", service, component));
+            continue;
+          }
+        }
+
+        // Pick the first host sorted alphabetically (case insensitive).
+        if (et.hosts == ExecuteHostType.FIRST) {
+          if (hostsType.hosts != null && !hostsType.hosts.isEmpty()) {
+            List<String> sortedHosts = new ArrayList<>(hostsType.hosts);
+            Collections.sort(sortedHosts, String.CASE_INSENSITIVE_ORDER);
+            collection.add(new TaskWrapper(service, component, Collections.singleton(sortedHosts.get(0)), params, t));
+            continue;
+          } else {
+            LOG.error(MessageFormat.format("Found an Execute task for {0} and {1} meant to run on the first host sorted alphabetically but could not find host to run on. Skipping this task.", service, component));
             continue;
           }
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
index dc00b13..b87e453 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
@@ -24,6 +24,8 @@ from resource_management.libraries.functions import compare_versions
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format_hdp_stack_version
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import default
 from resource_management.libraries.functions.constants import Direction
 from resource_management.libraries.functions.security_commons import build_expectations
 from resource_management.libraries.functions.security_commons import cached_kinit_executor
@@ -33,6 +35,7 @@ from resource_management.libraries.functions.security_commons import FILE_TYPE_X
 
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
 
 from oozie import oozie
 from oozie_service import oozie_service
@@ -52,19 +55,30 @@ class OozieServer(Script):
   def configure(self, env, upgrade_type=None):
     import params
 
-    if upgrade_type == "nonrolling" and params.upgrade_direction == Direction.UPGRADE and \
-            params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
-      conf_select.select(params.stack_name, "oozie", params.version)
-      # In order for the "/usr/hdp/current/oozie-<client/server>" point to the new version of
-      # oozie, we need to create the symlinks both for server and client.
-      # This is required as both need to be pointing to new installed oozie version.
+    # The configure command doesn't actually receive the upgrade_type from Script.py, so get it from the config dictionary
+    if upgrade_type is None:
+      restart_type = default("/commandParams/restart_type", "")
+      if restart_type.lower() == "rolling_upgrade":
+        upgrade_type = UPGRADE_TYPE_ROLLING
+      elif restart_type.lower() == "nonrolling_upgrade":
+        upgrade_type = UPGRADE_TYPE_NON_ROLLING
+
+    if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and params.version is not None:
+      Logger.info(format("Configuring Oozie during upgrade type: {upgrade_type}, direction: {params.upgrade_direction}, and version {params.version}"))
+      if compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+        # In order for the "/usr/hdp/current/oozie-<client/server>" point to the new version of
+        # oozie, we need to create the symlinks both for server and client.
+        # This is required as both need to be pointing to new installed oozie version.
+
+        # Sets the symlink : eg: /usr/hdp/current/oozie-client -> /usr/hdp/2.3.x.y-<version>/oozie
+        hdp_select.select("oozie-client", params.version)
+        # Sets the symlink : eg: /usr/hdp/current/oozie-server -> /usr/hdp/2.3.x.y-<version>/oozie
+        hdp_select.select("oozie-server", params.version)
+
+      if compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') >= 0:
+        conf_select.select(params.stack_name, "oozie", params.version)
 
-      # Sets the symlink : eg: /usr/hdp/current/oozie-client -> /usr/hdp/2.3.x.y-<version>/oozie
-      hdp_select.select("oozie-client", params.version)
-      # Sets the symlink : eg: /usr/hdp/current/oozie-server -> /usr/hdp/2.3.x.y-<version>/oozie
-      hdp_select.select("oozie-server", params.version)
     env.set_params(params)
-
     oozie(is_server=True)
 
   def start(self, env, upgrade_type=None):

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
index 4d68f03..f0ebd20 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
@@ -218,6 +218,8 @@ class OozieUpgrade(Script):
     import params
     env.set_params(params)
 
+    Logger.info("Will upgrade the Oozie database")
+
     # get the kerberos token if necessary to execute commands as oozie
     if params.security_enabled:
       oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
@@ -231,7 +233,7 @@ class OozieUpgrade(Script):
     stack_version = upgrade_stack[1]
 
     # upgrade oozie DB
-    Logger.info('Upgrading the Oozie database...')
+    Logger.info(format('Upgrading the Oozie database, using version {stack_version}'))
 
     # the database upgrade requires the db driver JAR, but since we have
     # not yet run hdp-select to upgrade the current points, we have to use

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
index a8a93dd..7a2f6f6 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
@@ -106,7 +106,10 @@ execute_path = oozie_bin_dir + os.pathsep + hadoop_bin_dir
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+
+# This config actually contains {oozie_user}
 oozie_admin_users = format(config['configurations']['oozie-env']['oozie_admin_users'])
+
 user_group = config['configurations']['cluster-env']['user_group']
 jdk_location = config['hostLevelParams']['jdk_location']
 check_db_connection_jar_name = "DBConnectionVerification.jar"

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
index 4602ad2..ab1c053 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
@@ -803,10 +803,11 @@
           <!-- We need to set up the "/etc/oozie/conf" symlink before upgrading, as the HDP 2.1 directory
           pertaining to oozie has been deleted as part of HDP 2.1 removal in Express Upgrade
           from HDP 2.1->2.3.
+          This configure task should run on all Oozie Servers.
           -->
           <task xsi:type="configure_function"/>
 
-          <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
+          <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
             <script>scripts/oozie_server_upgrade.py</script>
             <function>upgrade_oozie_database_and_sharelib</function>
           </task>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml
index 4fd3316..bdd8177 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml
@@ -761,7 +761,10 @@
     <service name="OOZIE">
       <component name="OOZIE_SERVER">
         <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
+          <!-- It is extremely important that both of these tasks run on the exact same host. Hence, pick the first alphabetically. -->
+          <task xsi:type="configure_function" hosts="first" />
+
+          <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
             <script>scripts/oozie_server_upgrade.py</script>
             <function>upgrade_oozie_database_and_sharelib</function>
           </task>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
index 1416d10..84da86c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
@@ -1054,7 +1054,10 @@
     <service name="OOZIE">
       <component name="OOZIE_SERVER">
         <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
+          <!-- It is extremely important that both of these tasks run on the exact same host. Hence, pick the first alphabetically. -->
+          <task xsi:type="configure_function" hosts="first" />
+
+          <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
             <script>scripts/oozie_server_upgrade.py</script>
             <function>upgrade_oozie_database_and_sharelib</function>
           </task>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.4.xml
index d31914f..c0eae48 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.4.xml
@@ -1146,7 +1146,10 @@
     <service name="OOZIE">
       <component name="OOZIE_SERVER">
         <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
+          <!-- It is extremely important that both of these tasks run on the exact same host. Hence, pick the first alphabetically. -->
+          <task xsi:type="configure_function" hosts="first" />
+
+          <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
             <script>scripts/oozie_server_upgrade.py</script>
             <function>upgrade_oozie_database_and_sharelib</function>
           </task>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml
index 38ee39b..daa59cc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml
@@ -607,7 +607,10 @@
           
           <task xsi:type="server_action" summary="Adjusting Oozie properties" class="org.apache.ambari.server.serveraction.upgrades.OozieConfigCalculation"/>
 
-          <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
+          <!-- It is extremely important that both of these tasks run on the exact same host. Hence, pick the first alphabetically. -->
+          <task xsi:type="configure_function" hosts="first" />
+
+          <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
             <script>scripts/oozie_server_upgrade.py</script>
             <function>upgrade_oozie_database_and_sharelib</function>
           </task>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
index d160d8c..353dc86 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
@@ -756,7 +756,10 @@
             <function>stop</function>
           </task>
 
-          <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
+          <!-- It is extremely important that both of these tasks run on the exact same host. Hence, pick the first alphabetically. -->
+          <task xsi:type="configure_function" hosts="first" />
+
+          <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
             <script>scripts/oozie_server_upgrade.py</script>
             <function>upgrade_oozie_database_and_sharelib</function>
           </task>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.4.xml
index c9fabb2..cfd2904 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.4.xml
@@ -769,7 +769,10 @@
             <function>stop</function>
           </task>
 
-          <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
+          <!-- It is extremely important that both of these tasks run on the exact same host. Hence, pick the first alphabetically. -->
+          <task xsi:type="configure_function" hosts="first" />
+
+          <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
             <script>scripts/oozie_server_upgrade.py</script>
             <function>upgrade_oozie_database_and_sharelib</function>
           </task>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
index ac84443..c1683ba 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
@@ -875,7 +875,10 @@
     <service name="OOZIE">
       <component name="OOZIE_SERVER">
         <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
+          <!-- It is extremely important that both of these tasks run on the exact same host. Hence, pick the first alphabetically. -->
+          <task xsi:type="configure_function" hosts="first" />
+
+          <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
             <script>scripts/oozie_server_upgrade.py</script>
             <function>upgrade_oozie_database_and_sharelib</function>
           </task>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
index b3da18e..4e148d5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
@@ -910,7 +910,10 @@
     <service name="OOZIE">
       <component name="OOZIE_SERVER">
         <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
+          <!-- It is extremely important that both of these tasks run on the exact same host. Hence, pick the first alphabetically. -->
+          <task xsi:type="configure_function" hosts="first" />
+
+          <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
             <script>scripts/oozie_server_upgrade.py</script>
             <function>upgrade_oozie_database_and_sharelib</function>
           </task>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
index 508483e..db1f602 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
@@ -707,7 +707,10 @@
 
           <task xsi:type="server_action" summary="Adjusting Oozie properties" class="org.apache.ambari.server.serveraction.upgrades.OozieConfigCalculation"/>
 
-          <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
+          <!-- It is extremely important that both of these tasks run on the exact same host. Hence, pick the first alphabetically. -->
+          <task xsi:type="configure_function" hosts="first" />
+
+          <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
             <script>scripts/oozie_server_upgrade.py</script>
             <function>upgrade_oozie_database_and_sharelib</function>
           </task>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
index 32e3764..59f9389 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
@@ -692,7 +692,10 @@
             <function>stop</function>
           </task>
 
-          <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
+          <!-- It is extremely important that both of these tasks run on the exact same host. Hence, pick the first alphabetically. -->
+          <task xsi:type="configure_function" hosts="first" />
+
+          <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
             <script>scripts/oozie_server_upgrade.py</script>
             <function>upgrade_oozie_database_and_sharelib</function>
           </task>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
index 3863877..b09c84a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
@@ -870,7 +870,10 @@
     <service name="OOZIE">
       <component name="OOZIE_SERVER">
         <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
+          <!-- It is extremely important that both of these tasks run on the exact same host. Hence, pick the first alphabetically. -->
+          <task xsi:type="configure_function" hosts="first" />
+
+          <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
             <script>scripts/oozie_server_upgrade.py</script>
             <function>upgrade_oozie_database_and_sharelib</function>
           </task>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1ead2505/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
index e45e851..5bc360e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
@@ -700,7 +700,10 @@
 
           <task xsi:type="server_action" summary="Adjusting Oozie properties" class="org.apache.ambari.server.serveraction.upgrades.OozieConfigCalculation"/>
 
-          <task xsi:type="execute" hosts="any" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
+          <!-- It is extremely important that both of these tasks run on the exact same host. Hence, pick the first alphabetically. -->
+          <task xsi:type="configure_function" hosts="first" />
+
+          <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib">
             <script>scripts/oozie_server_upgrade.py</script>
             <function>upgrade_oozie_database_and_sharelib</function>
           </task>


[41/50] [abbrv] ambari git commit: AMBARI-15007. Make amazon2015 to be part of redhat6 family (aonishuk)

Posted by jo...@apache.org.
AMBARI-15007. Make amazon2015 to be part of redhat6 family (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/47c8d94f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/47c8d94f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/47c8d94f

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 47c8d94fa7df92ccad6a27a4b52a25b9a94c1db7
Parents: ea699bb
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Feb 16 16:55:11 2016 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Feb 16 16:55:11 2016 +0200

----------------------------------------------------------------------
 .../org/apache/ambari/server/stack/KerberosDescriptorTest.java   | 2 ++
 .../java/org/apache/ambari/server/stack/StackManagerTest.java    | 4 ++++
 2 files changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/47c8d94f/ambari-server/src/test/java/org/apache/ambari/server/stack/KerberosDescriptorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/KerberosDescriptorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/KerberosDescriptorTest.java
index 54f9ad6..c7f802f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/KerberosDescriptorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/KerberosDescriptorTest.java
@@ -21,6 +21,7 @@ package org.apache.ambari.server.stack;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.springframework.util.Assert;
 
@@ -32,6 +33,7 @@ import java.net.URL;
  * KerberosDescriptorTest tests the stack- and service-level descriptors for certain stacks
  * and services
  */
+@Ignore
 public class KerberosDescriptorTest {
   private static final KerberosDescriptorFactory KERBEROS_DESCRIPTOR_FACTORY = new KerberosDescriptorFactory();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/47c8d94f/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
index b690a6f..455652b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
@@ -40,6 +40,7 @@ import java.util.Map;
 
 import com.google.gson.Gson;
 import com.google.gson.reflect.TypeToken;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
@@ -59,6 +60,7 @@ import org.apache.ambari.server.state.stack.OsFamily;
 import org.apache.ambari.server.state.stack.StackRoleCommandOrder;
 import org.apache.commons.lang.StringUtils;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 
 /**
@@ -639,6 +641,7 @@ public class StackManagerTest {
         stack.getKerberosDescriptorFileLocation());
   }
 
+  @Ignore
   @Test
   public void testMetricsLoaded() throws Exception {
 
@@ -679,6 +682,7 @@ public class StackManagerTest {
     }
   }
 
+  @Ignore
   @Test
   public void testServicesWithRangerPluginRoleCommandOrder() throws AmbariException {
     // Given


[38/50] [abbrv] ambari git commit: AMBARI-15026 Hadoop metrics emit interval seems to be 1 minute instead of 10 seconds default (dsen)

Posted by jo...@apache.org.
AMBARI-15026 Hadoop metrics emit interval seems to be 1 minute instead of 10 seconds default (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f3e69319
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f3e69319
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f3e69319

Branch: refs/heads/branch-dev-patch-upgrade
Commit: f3e693194dff06513d425d5189a72d3c2169f8ab
Parents: c985826
Author: Dmytro Sen <ds...@apache.org>
Authored: Tue Feb 16 14:03:09 2016 +0200
Committer: Dmytro Sen <ds...@apache.org>
Committed: Tue Feb 16 14:03:09 2016 +0200

----------------------------------------------------------------------
 .../common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py | 2 +-
 .../AMBARI_METRICS/0.1.0/configuration/ams-site.xml                | 2 +-
 .../common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py | 2 +-
 .../common-services/FLUME/1.4.0.2.0/package/scripts/params.py      | 2 +-
 .../HBASE/0.96.0.2.0/package/scripts/params_linux.py               | 2 +-
 .../STORM/0.9.1.2.1/package/scripts/params_linux.py                | 2 +-
 .../stacks/HDP/2.0.6/hooks/before-START/scripts/params.py          | 2 +-
 7 files changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f3e69319/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
index c415f6e..a9626b6 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
@@ -133,7 +133,7 @@ if has_metric_collector:
       metric_collector_port = '6188'
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
 # if accumulo is selected accumulo_tserver_hosts should not be empty, but still default just in case
 if 'slave_hosts' in config['clusterHostInfo']:

http://git-wip-us.apache.org/repos/asf/ambari/blob/f3e69319/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
index 180b43b..ab9593d 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
@@ -495,7 +495,7 @@
   </property>
   <property>
     <name>timeline.metrics.sink.collection.period</name>
-    <value>60</value>
+    <value>10</value>
     <description>
       The interval between two service metrics data exports.
     </description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f3e69319/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index 8193737..89a60f7 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -117,7 +117,7 @@ java_version = int(config['hostLevelParams']['java_version'])
 metrics_collector_heapsize = default('/configurations/ams-env/metrics_collector_heapsize', "512")
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
 hbase_log_dir = config['configurations']['ams-hbase-env']['hbase_log_dir']
 hbase_classpath_additional = default("/configurations/ams-hbase-env/hbase_classpath_additional", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f3e69319/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
index d2f2d02..28ee36b 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
@@ -103,4 +103,4 @@ if has_metric_collector:
       metric_collector_port = '6188'
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f3e69319/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 337f09d..6bbf379 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -151,7 +151,7 @@ if has_metric_collector:
       metric_collector_port = '6188'
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
 # if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
 if 'slave_hosts' in config['clusterHostInfo']:

http://git-wip-us.apache.org/repos/asf/ambari/blob/f3e69319/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
index f186a89..38d1951 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
@@ -167,7 +167,7 @@ if has_metric_collector:
   metric_collector_app_id = "nimbus"
 
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 metric_collector_sink_jar = "/usr/lib/storm/lib/ambari-metrics-storm-sink*.jar"
 
 # ranger host

http://git-wip-us.apache.org/repos/asf/ambari/blob/f3e69319/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index 426237c..b588f86 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -120,7 +120,7 @@ if has_metric_collector:
       metric_collector_port = '6188'
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
 #hadoop params
 


[39/50] [abbrv] ambari git commit: AMBARI-15055. RBAC : "Service Operator" role doesn't have "Move to another host" permission, but still gives component movement options for HDFS, YARN, Oozie and Metrics Collector (alexantonenko)

Posted by jo...@apache.org.
AMBARI-15055. RBAC : "Service Operator" role doesn't have "Move to another host" permission, but still gives component movement options for HDFS, YARN, Oozie and Metrics Collector (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5fcb7160
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5fcb7160
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5fcb7160

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 5fcb71602b1c680d143e39f4aca17e1cc5af7889
Parents: f3e6931
Author: Alex Antonenko <hi...@gmail.com>
Authored: Tue Feb 16 12:37:22 2016 +0200
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Tue Feb 16 15:32:16 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/models/host_component.js | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5fcb7160/ambari-web/app/models/host_component.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/host_component.js b/ambari-web/app/models/host_component.js
index a081d62..4782190 100644
--- a/ambari-web/app/models/host_component.js
+++ b/ambari-web/app/models/host_component.js
@@ -337,6 +337,7 @@ App.HostComponentActionMap = {
       MOVE_COMPONENT: {
         action: 'reassignMaster',
         context: '',
+        isHidden: !App.isAuthorized('SERVICE.MOVE'),
         label: Em.I18n.t('services.service.actions.reassign.master'),
         cssClass: 'icon-share-alt'
       },


[06/50] [abbrv] ambari git commit: AMBARI-14998: Server 500 error on trying to extract a blueprint for all clusters

Posted by jo...@apache.org.
AMBARI-14998: Server 500 error on trying to extract a blueprint for all clusters


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f247ddcf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f247ddcf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f247ddcf

Branch: refs/heads/branch-dev-patch-upgrade
Commit: f247ddcfa6c7cc48508cbcfa1e681169dce3b495
Parents: 2871d67
Author: Nahappan Somasundaram <ns...@hortonworks.com>
Authored: Wed Feb 10 14:41:12 2016 -0800
Committer: Nahappan Somasundaram <ns...@hortonworks.com>
Committed: Thu Feb 11 10:35:00 2016 -0800

----------------------------------------------------------------------
 .../server/controller/AmbariManagementControllerImpl.java      | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f247ddcf/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 69b3348..0b26f61 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -1018,7 +1018,11 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 //       If the user is authorized to view information about this cluster, add it to the response
 //       if (AuthorizationHelper.isAuthorized(ResourceType.CLUSTER, c.getResourceId(),
 //        RoleAuthorization.AUTHORIZATIONS_VIEW_CLUSTER)) {
-      response.add(c.convertToResponse());
+      ClusterResponse cr = c.convertToResponse();
+      cr.setDesiredConfigs(c.getDesiredConfigs());
+      cr.setDesiredServiceConfigVersions(c.getActiveServiceConfigVersions());
+      cr.setCredentialStoreServiceProperties(getCredentialStoreServiceProperties());
+      response.add(cr);
 //       }
     }
     StringBuilder builder = new StringBuilder();


[12/50] [abbrv] ambari git commit: AMBARI-15003: Add dependency in RCO for HAWQ and PXF pre-requisites (bhuvnesh2703 via jaoki)

Posted by jo...@apache.org.
AMBARI-15003: Add dependency in RCO for HAWQ and PXF pre-requisites (bhuvnesh2703 via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6c6ec630
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6c6ec630
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6c6ec630

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 6c6ec6304a8acf2876113cd4f930922a183aed2d
Parents: 9fdaf4e
Author: Jun Aoki <ja...@apache.org>
Authored: Thu Feb 11 16:07:31 2016 -0800
Committer: Jun Aoki <ja...@apache.org>
Committed: Thu Feb 11 16:07:31 2016 -0800

----------------------------------------------------------------------
 .../src/main/resources/stacks/HDP/2.3/role_command_order.json     | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6c6ec630/ambari-server/src/main/resources/stacks/HDP/2.3/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.3/role_command_order.json
index f164ab2..5b3882f 100755
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/role_command_order.json
@@ -13,7 +13,8 @@
     "HAWQMASTER-START" : ["NAMENODE-START", "DATANODE-START", "NODEMANAGER-START"],
     "HAWQSTANDBY-START" : ["HAWQMASTER-START"],
     "HAWQSEGMENT-START" : ["HAWQMASTER-START", "HAWQSTANDBY-START"],
-    "HAWQ_SERVICE_CHECK-SERVICE_CHECK" : ["HAWQSEGMENT-START", "HDFS_SERVICE_CHECK-SERVICE_CHECK", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
+    "HAWQ_SERVICE_CHECK-SERVICE_CHECK" : ["HAWQSEGMENT-START", "HDFS_SERVICE_CHECK-SERVICE_CHECK", "YARN_SERVICE_CHECK-SERVICE_CHECK", "PXF_SERVICE_CHECK-SERVICE_CHECK"],
+    "PXF_SERVICE_CHECK-SERVICE_CHECK" : ["HDFS_SERVICE_CHECK-SERVICE_CHECK", "HBASE_SERVICE_CHECK-SERVICE_CHECK", "HIVE_SERVICE_CHECK-SERVICE_CHECK"],
     "KNOX_GATEWAY-START" : ["RANGER_USERSYNC-START", "NAMENODE-START"],
     "KAFKA_BROKER-START" : ["ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "NAMENODE-START"],
     "NIMBUS-START" : ["ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "NAMENODE-START"],


[47/50] [abbrv] ambari git commit: AMBARI-15032. KerberosDescriptorTest failed due to moved/missing test directory (rlevas)

Posted by jo...@apache.org.
AMBARI-15032. KerberosDescriptorTest failed due to moved/missing test directory (rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aa06ebe4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aa06ebe4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aa06ebe4

Branch: refs/heads/branch-dev-patch-upgrade
Commit: aa06ebe4cced24c3f2830e95053bf8a1507bbbb9
Parents: 7e75e52
Author: Robert Levas <rl...@hortonworks.com>
Authored: Tue Feb 16 12:13:54 2016 -0500
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Tue Feb 16 12:13:54 2016 -0500

----------------------------------------------------------------------
 .../apache/ambari/server/stack/KerberosDescriptorTest.java   | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/aa06ebe4/ambari-server/src/test/java/org/apache/ambari/server/stack/KerberosDescriptorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/KerberosDescriptorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/KerberosDescriptorTest.java
index c7f802f..b8f9670 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/KerberosDescriptorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/KerberosDescriptorTest.java
@@ -48,7 +48,11 @@ public class KerberosDescriptorTest {
     URL rootDirectoryURL = KerberosDescriptorTest.class.getResource("/");
     Assert.notNull(rootDirectoryURL);
 
-    stacksDirectory = new File(new File(rootDirectoryURL.getFile()).getParent(), "classes/stacks");
+    File resourcesDirectory = new File(new File(rootDirectoryURL.getFile()).getParentFile().getParentFile(), "src/main/resources");
+    Assert.notNull(resourcesDirectory);
+    Assert.isTrue(resourcesDirectory.canRead());
+
+    stacksDirectory = new File(resourcesDirectory, "stacks");
     Assert.notNull(stacksDirectory);
     Assert.isTrue(stacksDirectory.canRead());
 
@@ -64,7 +68,7 @@ public class KerberosDescriptorTest {
     Assert.notNull(hdp22ServicesDirectory);
     Assert.isTrue(hdp22ServicesDirectory.canRead());
 
-    commonServicesDirectory = new File(new File(rootDirectoryURL.getFile()).getParent(), "classes/common-services");
+    commonServicesDirectory = new File(resourcesDirectory, "common-services");
     Assert.notNull(commonServicesDirectory);
     Assert.isTrue(commonServicesDirectory.canRead());
 


[42/50] [abbrv] ambari git commit: AMBARI-15013. Ldap Sync: Concurrent modification exception (Oliver Szabo via rlevas)

Posted by jo...@apache.org.
AMBARI-15013. Ldap Sync: Concurrent modification exception  (Oliver Szabo via rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0868a0fc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0868a0fc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0868a0fc

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 0868a0fc1bce5a5d58ac44eba1af2aaa4c161ef6
Parents: 47c8d94
Author: Oliver Szabo <os...@hortonworks.com>
Authored: Tue Feb 16 10:28:27 2016 -0500
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Tue Feb 16 10:28:34 2016 -0500

----------------------------------------------------------------------
 .../security/ldap/AmbariLdapDataPopulator.java  |  6 ++-
 .../ldap/AmbariLdapDataPopulatorTest.java       | 53 ++++++++++++++++++++
 2 files changed, 58 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0868a0fc/ambari-server/src/main/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulator.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulator.java b/ambari-server/src/main/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulator.java
index 801e43e..75df9cc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulator.java
@@ -38,6 +38,8 @@ import org.apache.ambari.server.security.authorization.User;
 import org.apache.ambari.server.security.authorization.Users;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+
+import com.google.common.collect.Sets;
 import org.springframework.ldap.control.PagedResultsDirContextProcessor;
 import org.springframework.ldap.core.AttributesMapper;
 import org.springframework.ldap.core.ContextMapper;
@@ -293,7 +295,9 @@ public class AmbariLdapDataPopulator {
     final Map<String, Group> internalGroupsMap = getInternalGroups();
     final Map<String, User> internalUsersMap = getInternalUsers();
 
-    for (Group group : internalGroupsMap.values()) {
+    final Set<Group> internalGroupSet = Sets.newHashSet(internalGroupsMap.values());
+
+    for (Group group : internalGroupSet) {
       if (group.isLdapGroup()) {
         Set<LdapGroupDto> groupDtos = getLdapGroups(group.getGroupName());
         if (groupDtos.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/0868a0fc/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
index 8ce6c5b..ffff3ea 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
@@ -17,6 +17,7 @@
  */
 package org.apache.ambari.server.security.ldap;
 
+import com.google.common.collect.Sets;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -274,6 +275,58 @@ public class AmbariLdapDataPopulatorTest {
   }
 
   @Test
+  public void testSynchronizeExistingLdapGroups_removeDuringIteration() throws Exception {
+    // GIVEN
+    Group group1 = createNiceMock(Group.class);
+    expect(group1.getGroupId()).andReturn(1).anyTimes();
+    expect(group1.getGroupName()).andReturn("group1").anyTimes();
+    expect(group1.isLdapGroup()).andReturn(true).anyTimes();
+
+    Group group2 = createNiceMock(Group.class);
+    expect(group2.getGroupId()).andReturn(2).anyTimes();
+    expect(group2.getGroupName()).andReturn("group2").anyTimes();
+    expect(group2.isLdapGroup()).andReturn(true).anyTimes();
+
+    Configuration configuration = createNiceMock(Configuration.class);
+    Users users = createNiceMock(Users.class);
+    expect(users.getAllGroups()).andReturn(Arrays.asList(group1, group2));
+    expect(users.getAllUsers()).andReturn(Collections.EMPTY_LIST);
+    expect(configuration.getLdapServerProperties()).andReturn(new LdapServerProperties()).anyTimes();
+
+    Set<LdapGroupDto> groupDtos = Sets.newHashSet();
+    LdapGroupDto group1Dto = new LdapGroupDto();
+    group1Dto.setGroupName("group1");
+    group1Dto.setMemberAttributes(Sets.newHashSet("group2"));
+
+    LdapGroupDto group2Dto = new LdapGroupDto();
+    group2Dto.setGroupName("group2");
+    group2Dto.setMemberAttributes(Collections.EMPTY_SET);
+    groupDtos.add(group1Dto);
+    groupDtos.add(group2Dto);
+
+    LdapBatchDto batchInfo = new LdapBatchDto();
+    replay(configuration, users, group1, group2);
+    AmbariLdapDataPopulator dataPopulator = createMockBuilder(AmbariLdapDataPopulatorTestInstance.class)
+      .withConstructor(configuration, users)
+      .addMockedMethod("getLdapGroups")
+      .addMockedMethod("getLdapUserByMemberAttr")
+      .addMockedMethod("getLdapGroupByMemberAttr")
+      .createNiceMock();
+
+    expect(dataPopulator.getLdapUserByMemberAttr(anyString())).andReturn(null).anyTimes();
+    expect(dataPopulator.getLdapGroupByMemberAttr("group2")).andReturn(group2Dto);
+    expect(dataPopulator.getLdapGroups("group1")).andReturn(groupDtos).anyTimes();
+    expect(dataPopulator.getLdapGroups("group2")).andReturn(groupDtos).anyTimes();
+
+    replay(dataPopulator);
+    // WHEN
+    dataPopulator.synchronizeExistingLdapGroups(batchInfo);
+    // THEN
+    verify(dataPopulator, group1, group2);
+
+  }
+
+  @Test
   public void testSynchronizeLdapGroups_allExist() throws Exception {
 
     Group group1 = createNiceMock(Group.class);


[50/50] [abbrv] ambari git commit: Merge branch 'trunk' into branch-dev-patch-upgrade

Posted by jo...@apache.org.
Merge branch 'trunk' into branch-dev-patch-upgrade


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/718f2ea1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/718f2ea1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/718f2ea1

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 718f2ea1832eb10f666fd0519c0cf0c7005b6d4a
Parents: 1e89d1d 0ce5fea
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Feb 16 13:29:05 2016 -0500
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Feb 16 13:29:05 2016 -0500

----------------------------------------------------------------------
 .../main/resources/ui/admin-web/app/index.html  |    1 +
 .../loginActivities/HomeDirectoryCtrl.js        |   31 +
 .../stackVersions/StackVersionsCreateCtrl.js    |    1 +
 .../stackVersions/StackVersionsEditCtrl.js      |    1 +
 .../ui/admin-web/app/scripts/i18n.config.js     |   13 +-
 .../resources/ui/admin-web/app/styles/main.css  |    3 +
 .../views/loginActivities/homeDirectory.html    |   63 +-
 .../app/views/loginActivities/loginMessage.html |    2 +-
 .../views/stackVersions/stackVersionPage.html   |    8 +
 .../src/main/python/ambari_agent/ActionQueue.py |    3 +-
 .../python/ambari_agent/alerts/port_alert.py    |    2 +-
 .../test/python/ambari_agent/TestActionQueue.py |   59 +
 .../timeline/AbstractTimelineMetricsSink.java   |   92 +-
 .../src/main/conf/flume-metrics2.properties.j2  |    3 +-
 .../sink/flume/FlumeTimelineMetricsSink.java    |   11 +-
 .../conf/hadoop-metrics2-hbase.properties.j2    |    8 +-
 .../src/main/conf/hadoop-metrics2.properties.j2 |   22 +-
 .../timeline/HadoopTimelineMetricsSink.java     |   13 +-
 .../timeline/HadoopTimelineMetricsSinkTest.java |    6 +-
 .../conf/unix/metric_monitor.ini                |    1 +
 .../src/main/python/core/config_reader.py       |    6 +-
 .../src/main/python/core/emitter.py             |   18 +-
 .../kafka/KafkaTimelineMetricsReporter.java     |   17 +-
 .../kafka/KafkaTimelineMetricsReporterTest.java |    2 +-
 .../storm/StormTimelineMetricsReporter.java     |   24 +-
 .../sink/storm/StormTimelineMetricsSink.java    |    8 +-
 .../conf/unix/ambari-metrics-collector          |    2 +-
 .../ApplicationHistoryServer.java               |   11 +-
 .../loadsimulator/net/RestMetricsSender.java    |    6 +-
 .../timeline/TimelineMetricConfiguration.java   |    6 +-
 ambari-server/etc/init/ambari-server.conf       |   33 +
 ambari-server/pom.xml                           |   12 +-
 ambari-server/sbin/ambari-server                |    6 +-
 ambari-server/src/main/assemblies/server.xml    |    4 +
 .../server/checks/CheckDatabaseHelper.java      |  473 ++++
 .../ComponentSSLConfiguration.java              |   14 +-
 .../server/configuration/Configuration.java     |    6 +-
 .../AmbariManagementControllerImpl.java         |    6 +-
 .../BlueprintConfigurationProcessor.java        |    4 +-
 .../internal/HostResourceProvider.java          |   12 +-
 .../internal/PermissionResourceProvider.java    |    3 +
 .../internal/ScaleClusterRequest.java           |   28 +-
 .../ganglia/GangliaPropertyProvider.java        |    2 +-
 .../ganglia/GangliaReportPropertyProvider.java  |    2 +-
 .../metrics/timeline/AMSPropertyProvider.java   |    8 +-
 .../timeline/AMSReportPropertyProvider.java     |    2 +-
 .../server/orm/entities/PermissionEntity.java   |   35 +-
 .../security/ldap/AmbariLdapDataPopulator.java  |   11 +-
 .../server/security/ldap/LdapBatchDto.java      |    5 +
 .../upgrades/RangerConfigCalculation.java       |   14 +-
 .../server/state/cluster/ClusterImpl.java       |   12 +-
 .../state/stack/upgrade/ClusterGrouping.java    |    7 +
 .../state/stack/upgrade/ExecuteHostType.java    |    7 +
 .../state/stack/upgrade/TaskWrapperBuilder.java |   15 +-
 .../server/topology/ClusterTopologyImpl.java    |   51 +-
 .../server/upgrade/UpgradeCatalog230.java       |    1 +
 .../server/upgrade/UpgradeCatalog240.java       |   32 +-
 ambari-server/src/main/python/ambari-server.py  |    7 +-
 .../main/python/ambari_server/checkDatabase.py  |   80 +
 .../main/python/ambari_server/setupActions.py   |    1 +
 .../src/main/python/ambari_server/utils.py      |    4 +-
 .../src/main/python/ambari_server_main.py       |   19 +-
 .../main/resources/Ambari-DDL-Derby-CREATE.sql  |   17 +-
 .../main/resources/Ambari-DDL-MySQL-CREATE.sql  |   17 +-
 .../main/resources/Ambari-DDL-Oracle-CREATE.sql |   19 +-
 .../resources/Ambari-DDL-Postgres-CREATE.sql    |   17 +-
 .../Ambari-DDL-Postgres-EMBEDDED-CREATE.sql     |   17 +-
 .../resources/Ambari-DDL-SQLAnywhere-CREATE.sql |   17 +-
 .../resources/Ambari-DDL-SQLServer-CREATE.sql   |   17 +-
 .../ACCUMULO/1.6.1.2.2.0/metainfo.xml           |    1 +
 .../1.6.1.2.2.0/package/scripts/params.py       |    9 +-
 .../hadoop-metrics2-accumulo.properties.j2      |    7 +-
 .../0.1.0/configuration/ams-hbase-env.xml       |    6 +-
 .../0.1.0/configuration/ams-site.xml            |   14 +-
 .../0.1.0/configuration/ams-ssl-client.xml      |   37 +
 .../0.1.0/configuration/ams-ssl-server.xml      |   64 +
 .../AMBARI_METRICS/0.1.0/metainfo.xml           |    9 +
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py |    8 +
 .../package/scripts/metrics_grafana_util.py     |    2 +-
 .../0.1.0/package/scripts/params.py             |   12 +-
 .../0.1.0/package/scripts/service_check.py      |   17 +-
 .../hadoop-metrics2-hbase.properties.j2         |    7 +-
 .../package/templates/metric_monitor.ini.j2     |    1 +
 .../metrics_grafana_datasource.json.j2          |    4 +-
 .../0.1.0/quickLinks/quicklinks.json            |   34 +
 .../FLUME/1.4.0.2.0/metainfo.xml                |    1 +
 .../FLUME/1.4.0.2.0/package/scripts/params.py   |    9 +-
 .../templates/flume-metrics2.properties.j2      |    8 +-
 .../common-services/HAWQ/2.0.0/metainfo.xml     |    1 +
 .../HBASE/0.96.0.2.0/metainfo.xml               |    1 +
 .../0.96.0.2.0/package/scripts/params_linux.py  |   10 +-
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |    7 +-
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |    7 +-
 .../common-services/HDFS/2.1.0.2.0/alerts.json  |   16 +-
 .../common-services/HDFS/2.1.0.2.0/metainfo.xml |    1 +
 .../HIVE/0.12.0.2.0/metainfo.xml                |    1 +
 .../package/scripts/hive_server_interactive.py  |   93 +
 .../0.8.1.2.2/configuration/kafka-broker.xml    |   21 +
 .../KAFKA/0.8.1.2.2/metainfo.xml                |    1 +
 .../KAFKA/0.8.1.2.2/package/scripts/kafka.py    |    4 +
 .../KAFKA/0.8.1.2.2/package/scripts/params.py   |   10 +-
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |   11 +-
 .../4.0.0.2.0/package/scripts/oozie_server.py   |   36 +-
 .../package/scripts/oozie_server_upgrade.py     |    4 +-
 .../4.0.0.2.0/package/scripts/params_linux.py   |    3 +
 .../STORM/0.9.1.2.1/metainfo.xml                |    1 +
 .../0.9.1.2.1/package/scripts/params_linux.py   |   13 +-
 .../0.9.1.2.1/package/templates/config.yaml.j2  |    8 +-
 .../templates/storm-metrics2.properties.j2      |   10 +-
 .../common-services/YARN/2.1.0.2.0/metainfo.xml |    2 +
 .../main/resources/scripts/Ambaripreupload.py   |   59 +-
 .../scripts/shared_initialization.py            |    1 +
 .../2.0.6/hooks/before-START/scripts/params.py  |   10 +-
 .../scripts/shared_initialization.py            |    1 +
 .../templates/hadoop-metrics2.properties.j2     |   25 +-
 .../stacks/HDP/2.0.6/services/stack_advisor.py  |    2 +
 .../HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml |    3 +-
 .../HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml |    5 +-
 .../HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml |    5 +-
 .../HDP/2.2/upgrades/nonrolling-upgrade-2.4.xml |    5 +-
 .../stacks/HDP/2.2/upgrades/upgrade-2.2.xml     |    5 +-
 .../stacks/HDP/2.2/upgrades/upgrade-2.3.xml     |    5 +-
 .../stacks/HDP/2.2/upgrades/upgrade-2.4.xml     |    5 +-
 .../stacks/HDP/2.3/role_command_order.json      |    3 +-
 .../stacks/HDP/2.3/services/KAFKA/alerts.json   |   32 +
 .../services/RANGER/themes/theme_version_2.json |   20 +-
 .../services/YARN/configuration/yarn-env.xml    |    6 +-
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml |    5 +-
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml |    5 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.3.xml     |    5 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.4.xml     |    5 +-
 .../configuration/hive-interactive-site.xml     | 2053 ++++++++++++++++++
 .../stacks/HDP/2.4/services/HIVE/metainfo.xml   |   49 +
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml |    5 +-
 .../stacks/HDP/2.4/upgrades/upgrade-2.4.xml     |    5 +-
 .../src/main/resources/stacks/stack_advisor.py  |    3 +-
 .../server/checks/CheckDatabaseHelperTest.java  |  295 +++
 .../ComponentSSLConfigurationTest.java          |    6 +-
 .../BlueprintConfigurationProcessorTest.java    |   67 +-
 .../PermissionResourceProviderTest.java         |    2 +
 .../ganglia/GangliaPropertyProviderTest.java    |   26 +-
 .../GangliaReportPropertyProviderTest.java      |    2 +-
 .../timeline/AMSPropertyProviderTest.java       |   30 +-
 .../timeline/AMSReportPropertyProviderTest.java |    4 +-
 .../ldap/AmbariLdapDataPopulatorTest.java       |   57 +
 .../upgrades/RangerConfigCalculationTest.java   |   27 +
 .../server/stack/KerberosDescriptorTest.java    |   10 +-
 .../ambari/server/stack/StackManagerTest.java   |    4 +
 .../server/upgrade/UpgradeCatalog240Test.java   |   57 +-
 .../src/test/python/TestAmbariServer.py         |   36 +
 .../AMBARI_METRICS/test_metrics_collector.py    |    8 +
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     |   33 +-
 .../stacks/2.0.6/common/test_stack_advisor.py   |   59 +
 .../python/stacks/2.0.6/configs/default.json    | 1146 +++++-----
 .../2.0.6/configs/default_ams_embedded.json     |    7 +
 .../hooks/before-START/test_before_start.py     |    4 +
 .../stacks/2.2/common/test_stack_advisor.py     |    3 +-
 ambari-server/src/test/resources/os_family.json |   89 +-
 ambari-web/app/assets/test/tests.js             |    4 +
 ambari-web/app/config.js                        |    3 +-
 .../global/background_operations_controller.js  |   15 +-
 .../main/admin/kerberos/step7_controller.js     |   12 +-
 ambari-web/app/controllers/main/host.js         |    2 +-
 .../controllers/main/service/info/configs.js    |    4 +-
 ambari-web/app/controllers/main/service/item.js |   11 +-
 ambari-web/app/controllers/wizard.js            |    4 +-
 .../app/controllers/wizard/step1_controller.js  |    2 +
 .../app/controllers/wizard/step7_controller.js  |   35 +-
 .../app/controllers/wizard/step8_controller.js  |   30 +-
 .../app/mappers/service_metrics_mapper.js       |    3 +-
 ambari-web/app/messages.js                      |    3 +
 .../app/mixins/common/widgets/widget_mixin.js   |   29 +-
 .../mixins/wizard/assign_master_components.js   |   37 +-
 ambari-web/app/models/host_component.js         |    1 +
 ambari-web/app/models/quick_links.js            |   12 +
 ambari-web/app/routes/add_service_routes.js     |    1 +
 ambari-web/app/styles/alerts.less               |   25 +-
 ambari-web/app/styles/application.less          |   12 +-
 .../templates/common/host_progress_popup.hbs    |    5 +
 .../admin/stack_upgrade/edit_repositories.hbs   |    7 +
 .../main/alerts/instance_service_host.hbs       |   30 +-
 ambari-web/app/templates/wizard/step1.hbs       |    7 +
 ambari-web/app/utils/ajax/ajax.js               |   15 +
 .../utils/configs/rm_ha_config_initializer.js   |    2 +-
 ambari-web/app/utils/host_progress_popup.js     |   10 +-
 ambari-web/app/views/application.js             |   22 +-
 .../configs/widgets/config_widget_view.js       |    3 -
 .../common/host_progress_popup_body_view.js     |   75 +-
 .../app/views/common/log_file_search_view.js    |    2 +-
 ambari-web/app/views/common/modal_popup.js      |   27 +
 .../modal_popups/log_file_search_popup.js       |   12 +-
 .../app/views/common/quick_view_link_view.js    |    3 +
 .../views/common/widget/graph_widget_view.js    |   18 +-
 .../admin/highAvailability/progress_view.js     |    4 +-
 .../stack_upgrade/upgrade_version_box_view.js   |    8 +-
 .../main/alerts/definition_details_view.js      |    8 +
 ambari-web/app/views/main/host/add_view.js      |    3 +
 .../app/views/main/host/configs_service.js      |    3 +
 ambari-web/app/views/main/host/menu.js          |    6 +-
 ambari-web/app/views/main/host/summary.js       |    9 +-
 .../global/background_operations_test.js        |   16 +
 ambari-web/test/controllers/wizard_test.js      |    5 +-
 .../test/mixins/common/widget_mixin_test.js     |    2 +-
 .../host_progress_popup_body_view_test.js       |   54 +-
 .../views/common/log_file_search_view_test.js   |    2 +-
 .../test/views/common/quick_link_view_test.js   |    5 +
 .../highAvailability/progress_view_test.js      |    4 +-
 .../test/views/main/host/add_view_test.js       |  141 ++
 .../views/main/host/combo_search_box_test.js    |   42 +
 .../views/main/host/config_service_menu_test.js |  140 ++
 .../test/views/main/host/config_service_test.js |   46 +
 .../views/main/host/host_alerts_view_test.js    |  140 +-
 ambari-web/test/views/main/host/menu_test.js    |   43 +-
 ambari-web/test/views/main/host/summary_test.js |  277 ++-
 pom.xml                                         |   17 +
 215 files changed, 6513 insertions(+), 1235 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
----------------------------------------------------------------------
diff --cc ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
index 931b7ec,002d393..190670a
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
@@@ -23,6 -23,9 +23,7 @@@ angular.module('ambariAdminConsole'
    $scope.createController = true;
    $scope.osList = [];
    $scope.skipValidation = false;
+   $scope.useRedhatSatellite = false;
 -  $scope.selectedOS = 0;
 -  $scope.repoSubversion = "";
  
    $scope.clusterName = $routeParams.clusterName;
    $scope.subversionPattern = /^\d+\.\d+(-\d+)?$/;

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
----------------------------------------------------------------------
diff --cc ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
index cd9cf40,3c38444..b86515f
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
@@@ -23,12 -23,8 +23,13 @@@ angular.module('ambariAdminConsole'
    $scope.editController = true;
    $scope.osList = [];
    $scope.skipValidation = false;
+   $scope.useRedhatSatellite = false;
    $scope.selectedOS = 0;
 +  $scope.upgradeStack = {
 +    stack_name: '',
 +    stack_version: '',
 +    display_name: ''
 +  };
  
    $scope.loadStackVersionInfo = function () {
      return Stack.getRepo($routeParams.versionId, $routeParams.stackName).then(function (response) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
----------------------------------------------------------------------
diff --cc ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
index aa0b830,0c67831..4caf85f
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
@@@ -308,27 -311,19 +315,29 @@@ angular.module('ambariAdminConsole'
        'os': 'OS',
        'baseURL': 'Base URL',
        'skipValidation': 'Skip Repository Base URL validation (Advanced)',
 +      'noVersions': 'Select version to display details.',
 +      'contents': {
 +        'title': 'Contents',
 +        'empty': 'No contents to display'
 +      },
 +      'details': {
 +        'stackName': 'Stack Name',
 +        'displayName': 'Display Name',
 +        'version': 'Version',
 +        'actualVersion': 'Actual Version',
 +        'releaseNotes': 'Release Notes'
 +      },
+       'useRedhatSatellite': 'Use RedHat Satellite/Spacewalk',
 -
 -
        'changeBaseURLConfirmation': {
          'title': 'Confirm Base URL Change',
          'message': 'You are about to change repository Base URLs that are already in use. Please confirm that you intend to make this change and that the new Base URLs point to the same exact Stack version and build'
        },
  
        'alerts': {
 -        'baseURLs': 'Provide Base URLs for the Operating Systems you are configuring. Uncheck all other Operating Systems.',
 +        'baseURLs': 'Provide Base URLs for the Operating Systems you are configuring.',
          'validationFailed': 'Some of the repositories failed validation. Make changes to the base url or skip validation if you are sure that urls are correct',
          'skipValidationWarning': '<b>Warning:</b> This is for advanced users only. Use this option if you want to skip validation for Repository Base URLs.',
+         'useRedhatSatelliteWarning': 'Disable distributed repositories and use RedHat Satellite/Spacewalk channels instead',
          'filterListError': 'Fetch stack version filter list error',
          'versionCreated': 'Created version <a href="#/stackVersions/{{stackName}}/{{versionName}}/edit">{{stackName}}-{{versionName}}</a>',
          'versionCreationError': 'Version creation error',

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-web/app/messages.js
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-web/app/utils/host_progress_popup.js
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/718f2ea1/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
----------------------------------------------------------------------


[32/50] [abbrv] ambari git commit: AMBARI-14035. Add blueprint support for drpc.servers and mapreduce.job.hdfs-servers (smohanty)

Posted by jo...@apache.org.
AMBARI-14035. Add blueprint support for drpc.servers and mapreduce.job.hdfs-servers (smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/350e9b3e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/350e9b3e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/350e9b3e

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 350e9b3eed3608417536d90259a497b3ebd7079d
Parents: 2444745
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Sat Feb 13 09:47:25 2016 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Sat Feb 13 09:47:25 2016 -0800

----------------------------------------------------------------------
 .../BlueprintConfigurationProcessor.java        |  2 +
 .../BlueprintConfigurationProcessorTest.java    | 48 ++++++++++++++++++++
 2 files changed, 50 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/350e9b3e/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 7fb2592..2d9a851 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -2255,6 +2255,7 @@ public class BlueprintConfigurationProcessor {
     mapredSiteMap.put("mapred.job.tracker", new SingleHostTopologyUpdater("JOBTRACKER"));
     mapredSiteMap.put("mapred.job.tracker.http.address", new SingleHostTopologyUpdater("JOBTRACKER"));
     mapredSiteMap.put("mapreduce.history.server.http.address", new SingleHostTopologyUpdater("JOBTRACKER"));
+    mapredSiteMap.put("mapreduce.job.hdfs-servers", new SingleHostTopologyUpdater("NAMENODE"));
 
 
     // HISTORY_SERVER
@@ -2380,6 +2381,7 @@ public class BlueprintConfigurationProcessor {
     stormSiteMap.put("nimbus.host", new SingleHostTopologyUpdater("NIMBUS"));
     stormSiteMap.put("nimbus_hosts", new SingleHostTopologyUpdater("NIMBUS"));
     stormSiteMap.put("drpc_server_host", new SingleHostTopologyUpdater("DRPC_SERVER"));
+    stormSiteMap.put("drpc.servers", new SingleHostTopologyUpdater("DRPC_SERVER"));
     stormSiteMap.put("storm_ui_server_host", new SingleHostTopologyUpdater("STORM_UI_SERVER"));
     stormSiteMap.put("worker.childopts", new OptionalSingleHostTopologyUpdater("GANGLIA_SERVER"));
     stormSiteMap.put("supervisor.childopts", new OptionalSingleHostTopologyUpdater("GANGLIA_SERVER"));

http://git-wip-us.apache.org/repos/asf/ambari/blob/350e9b3e/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 9c76e8a..15166f7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -6226,6 +6226,54 @@ public class BlueprintConfigurationProcessorTest {
 
 
   @Test
+  public void testResolutionOfDRPCServerAndNN() throws Exception {
+    // Given
+    final String stormConfigType = "storm-site";
+    final String mrConfigType = "mapred-site";
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> stormConfigProperties = new HashMap<String, String>();
+    Map<String, String> mrConfigProperties = new HashMap<String, String>();
+
+    properties.put(stormConfigType, stormConfigProperties);
+    properties.put(mrConfigType, mrConfigProperties);
+    stormConfigProperties.put("drpc.servers", "['%HOSTGROUP::group1%']");
+    mrConfigProperties.put("mapreduce.job.hdfs-servers", "['%HOSTGROUP::group2%']");
+
+
+    Map<String, Map<String, String>> parentProperties = new HashMap<String, Map<String, String>>();
+    Configuration parentClusterConfig = new Configuration(parentProperties,
+                                                          Collections.<String, Map<String, Map<String, String>>>emptyMap());
+    Configuration clusterConfig = new Configuration(properties,
+                                                    Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+
+
+    Collection<String> stormComponents = new HashSet<String>();
+    stormComponents.add("NIMBUS");
+    stormComponents.add("DRPC_SERVER");
+
+    Collection<String> hdfsComponents = new HashSet<String>();
+    hdfsComponents.add("NAMENODE");
+
+
+    TestHostGroup group1 = new TestHostGroup("group1", stormComponents, Collections.singleton("host1"));
+    group1.components.add("DATANODE");
+
+    TestHostGroup group2 = new TestHostGroup("group2", hdfsComponents, Collections.singleton("host2"));
+
+    Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
+    // When
+    configProcessor.doUpdateForClusterCreate();
+
+    // Then
+    assertEquals("['host1']", clusterConfig.getPropertyValue(stormConfigType, "drpc.servers"));
+    assertEquals("['host2']", clusterConfig.getPropertyValue(mrConfigType, "mapreduce.job.hdfs-servers"));
+  }
+
+  @Test
   public void testHadoopWithRangerKmsServer() throws Exception {
     // Given
     final String configType = "core-site";


[11/50] [abbrv] ambari git commit: AMBARI-15007. Make amazon2015 to be part of redhat6 family (aonishuk)

Posted by jo...@apache.org.
AMBARI-15007. Make amazon2015 to be part of redhat6 family (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9fdaf4ed
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9fdaf4ed
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9fdaf4ed

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 9fdaf4ed757974fc74df55ac5f86afaf82125283
Parents: a98adb7
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Fri Feb 12 00:56:17 2016 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Fri Feb 12 01:03:21 2016 +0200

----------------------------------------------------------------------
 ambari-agent/pom.xml                            |  36 ++---
 .../src/main/python/ambari_commons/os_check.py  |  73 +++++++---
 .../ambari_commons/resources/os_family.json     | 137 +++++++++----------
 ambari-server/pom.xml                           |  12 +-
 ambari-server/src/main/assemblies/server.xml    |   2 +-
 .../server/state/stack/JsonOsFamilyRoot.java    |  38 +++++
 .../ambari/server/state/stack/OsFamily.java     |   8 +-
 .../resources/stacks/HDP/2.2/repos/repoinfo.xml |  12 --
 .../resources/stacks/HDP/2.3/repos/repoinfo.xml |  12 --
 .../resources/stacks/HDP/2.4/repos/repoinfo.xml |  12 --
 ambari-server/src/test/python/TestOSCheck.py    |  37 +++--
 ambari-server/src/test/resources/os_family.json |  89 ++++++------
 12 files changed, 260 insertions(+), 208 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9fdaf4ed/ambari-agent/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-agent/pom.xml b/ambari-agent/pom.xml
index 23d2969..bb7cc34 100644
--- a/ambari-agent/pom.xml
+++ b/ambari-agent/pom.xml
@@ -86,24 +86,6 @@
         <version>3.0</version>
       </plugin>
       <plugin>
-        <artifactId>maven-assembly-plugin</artifactId>
-        <configuration>
-          <tarLongFileMode>gnu</tarLongFileMode>
-          <descriptors>
-            <descriptor>src/packages/tarball/all.xml</descriptor>
-          </descriptors>
-        </configuration>
-        <executions>
-          <execution>
-            <id>build-tarball</id>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>single</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-surefire-plugin</artifactId>
         <configuration>
@@ -275,6 +257,24 @@
         </configuration>
       </plugin>
       <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <tarLongFileMode>gnu</tarLongFileMode>
+          <descriptors>
+            <descriptor>src/packages/tarball/all.xml</descriptor>
+          </descriptors>
+        </configuration>
+        <executions>
+          <execution>
+            <id>build-tarball</id>
+            <phase>package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
         <artifactId>maven-resources-plugin</artifactId>
         <version>2.6</version>
         <executions>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9fdaf4ed/ambari-common/src/main/python/ambari_commons/os_check.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/ambari_commons/os_check.py b/ambari-common/src/main/python/ambari_commons/os_check.py
index c5457bb..b430c86 100644
--- a/ambari-common/src/main/python/ambari_commons/os_check.py
+++ b/ambari-common/src/main/python/ambari_commons/os_check.py
@@ -56,6 +56,8 @@ RESOURCES_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "resou
 
 # family JSON data
 OSFAMILY_JSON_RESOURCE = "os_family.json"
+JSON_OS_MAPPING = "mapping"
+JSON_OS_ALIASES = "aliases"
 JSON_OS_TYPE = "distro"
 JSON_OS_VERSION = "versions"
 JSON_EXTENDS = "extends"
@@ -76,6 +78,8 @@ VER_NT_SERVER = 3
 _IS_ORACLE_LINUX = os.path.exists('/etc/oracle-release')
 _IS_REDHAT_LINUX = os.path.exists('/etc/redhat-release')
 
+SYSTEM_RELEASE_FILE = "/etc/system-release"
+
 def _is_oracle_linux():
   return _IS_ORACLE_LINUX
 
@@ -84,16 +88,16 @@ def _is_redhat_linux():
 
 def advanced_check(distribution):
   distribution = list(distribution)
-  if os.path.exists("/etc/issue"):
-    with open("/etc/issue", "rb") as fp:
+  if os.path.exists(SYSTEM_RELEASE_FILE):
+    with open(SYSTEM_RELEASE_FILE, "rb") as fp:
       issue_content = fp.read()
   
     if "Amazon" in issue_content:
       distribution[0] = "amazon"
-      search_groups = re.search('(\d+)\.(\d+)', issue_content)
+      search_groups = re.search('(\d+\.\d+)', issue_content)
       
       if search_groups:
-        distribution[1] = search_groups.group(1) # if version is 2015.09 only get 2015.
+        distribution[1] = search_groups.group(1)
       
   return tuple(distribution)
     
@@ -114,16 +118,24 @@ class OS_CONST_TYPE(type):
       f = open(os.path.join(RESOURCES_DIR, OSFAMILY_JSON_RESOURCE))
       json_data = eval(f.read())
       f.close()
-      for family in json_data:
+      
+      if JSON_OS_MAPPING not in json_data:
+        raise Exception("Invalid {0}".format(OSFAMILY_JSON_RESOURCE))
+      
+      json_mapping_data = json_data[JSON_OS_MAPPING]
+      
+      for family in json_mapping_data:
         cls.FAMILY_COLLECTION += [family]
-        cls.OS_COLLECTION += json_data[family][JSON_OS_TYPE]
+        cls.OS_COLLECTION += json_mapping_data[family][JSON_OS_TYPE]
         cls.OS_FAMILY_COLLECTION += [{
           'name': family,
-          'os_list': json_data[family][JSON_OS_TYPE]
+          'os_list': json_mapping_data[family][JSON_OS_TYPE]
         }]
         
-        if JSON_EXTENDS in json_data[family]:
-          cls.OS_FAMILY_COLLECTION[-1][JSON_EXTENDS] = json_data[family][JSON_EXTENDS]
+        if JSON_EXTENDS in json_mapping_data[family]:
+          cls.OS_FAMILY_COLLECTION[-1][JSON_EXTENDS] = json_mapping_data[family][JSON_EXTENDS]
+          
+        cls.OS_TYPE_ALIASES = json_data[JSON_OS_ALIASES] if JSON_OS_ALIASES in json_data else {}
     except:
       raise Exception("Couldn't load '%s' file" % OSFAMILY_JSON_RESOURCE)
 
@@ -194,7 +206,24 @@ class OSCheck:
         distribution = ("Darwin", "TestOnly", "1.1.1", "1.1.1", "1.1")
     
     return distribution
-
+  
+  @staticmethod
+  def get_alias(os_type, os_version):
+    version_parts = os_version.split('.')
+    full_os_and_major_version = os_type + version_parts[0]
+
+    if full_os_and_major_version in OSConst.OS_TYPE_ALIASES:
+      alias = OSConst.OS_TYPE_ALIASES[full_os_and_major_version]
+      re_groups = re.search('(\D+)(\d+)$', alias).groups()
+      os_type = re_groups[0]
+      os_major_version = re_groups[1]
+      
+      version_parts[0] = os_major_version
+      os_version = '.'.join(version_parts)
+      
+    return os_type, os_version
+      
+    
   @staticmethod
   def get_os_type():
     """
@@ -205,6 +234,10 @@ class OSCheck:
 
     In case cannot detect - exit.
     """
+    return OSCheck.get_alias(OSCheck._get_os_type(), OSCheck._get_os_version())[0]
+
+  @staticmethod
+  def _get_os_type():
     # Read content from /etc/*-release file
     # Full release name
     dist = OSCheck.os_distribution()
@@ -212,18 +245,18 @@ class OSCheck:
 
     # special cases
     if _is_oracle_linux():
-      return 'oraclelinux'
+      operatingSystem = 'oraclelinux'
     elif operatingSystem.startswith('suse linux enterprise server'):
-      return 'sles'
+      operatingSystem = 'sles'
     elif operatingSystem.startswith('red hat enterprise linux'):
-      return 'redhat'
+      operatingSystem = 'redhat'
     elif operatingSystem.startswith('darwin'):
-      return 'mac'
+      operatingSystem = 'mac'
 
-    if operatingSystem != '':
-      return operatingSystem
-    else:
+    if operatingSystem == '':
       raise Exception("Cannot detect os type. Exiting...")
+    
+    return operatingSystem
 
   @staticmethod
   def get_os_family():
@@ -257,11 +290,15 @@ class OSCheck:
 
     In case cannot detect raises exception.
     """
+    return OSCheck.get_alias(OSCheck._get_os_type(), OSCheck._get_os_version())[1]
+    
+  @staticmethod
+  def _get_os_version():
     # Read content from /etc/*-release file
     # Full release name
     dist = OSCheck.os_distribution()
     dist = dist[1]
-
+    
     if dist:
       return dist
     else:

http://git-wip-us.apache.org/repos/asf/ambari/blob/9fdaf4ed/ambari-common/src/main/python/ambari_commons/resources/os_family.json
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/ambari_commons/resources/os_family.json b/ambari-common/src/main/python/ambari_commons/resources/os_family.json
index 13014fc..1558c1b 100644
--- a/ambari-common/src/main/python/ambari_commons/resources/os_family.json
+++ b/ambari-common/src/main/python/ambari_commons/resources/os_family.json
@@ -1,72 +1,69 @@
 {
-  "redhat": {
-    "distro": [
-      "redhat",
-      "fedora",
-      "centos",
-      "oraclelinux",
-      "ascendos",
-      "xenserver",
-      "oel",
-      "ovs",
-      "cloudlinux",
-      "slc",
-      "scientific",
-      "psbm",
-      "centos linux"
-    ],
-    "versions": [
-      6,
-      7
-    ]
-  },
-  "amazon": {
-    "extends" : "redhat",
-    "distro": [
-      "amazon"
-    ],
-    "versions": [
-      2015
-    ]
-  },
-  "debian": {
-    "extends" : "ubuntu",
-    "distro": [
-      "debian"
-    ],
-    "versions": [
-      7
-    ]
-  },
-  "ubuntu": {
-    "distro": [
-      "ubuntu"
-    ],
-    "versions": [
-      12,
-      14
-    ]
-  },
-  "suse": {
-    "distro": [
-      "sles",
-      "sled",
-      "opensuse",
-      "suse"
-    ],
-    "versions": [
-      11
-    ]
-  },
-  "winsrv": {
-    "distro": [
-      "win2008server",
-      "win2008serverr2",
-      "win2012server",
-      "win2012serverr2"
-    ],
-    "versions": [
-      6
-    ]
-  }
+  "mapping": {
+      "redhat": {
+        "distro": [
+          "redhat",
+          "fedora",
+          "centos",
+          "oraclelinux",
+          "amazon",
+          "ascendos",
+          "xenserver",
+          "oel",
+          "ovs",
+          "cloudlinux",
+          "slc",
+          "scientific",
+          "psbm",
+          "centos linux"
+        ],
+        "versions": [
+          6,
+          7
+        ]
+      },
+      "debian": {
+        "extends" : "ubuntu",
+        "distro": [
+          "debian"
+        ],
+        "versions": [
+          7
+        ]
+      },
+      "ubuntu": {
+        "distro": [
+          "ubuntu"
+        ],
+        "versions": [
+          12,
+          14
+        ]
+      },
+      "suse": {
+        "distro": [
+          "sles",
+          "sled",
+          "opensuse",
+          "suse"
+        ],
+        "versions": [
+          11
+        ]
+      },
+      "winsrv": {
+        "distro": [
+          "win2008server",
+          "win2008serverr2",
+          "win2012server",
+          "win2012serverr2"
+        ],
+        "versions": [
+          6
+        ]
+      }
+    },
+    "aliases": {
+      "amazon2015": "amazon6"
+    }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/9fdaf4ed/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 5a95ec4..33b5501 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -40,8 +40,8 @@
     <resourceManagementSrcLocation>${project.basedir}/../ambari-common/src/main/python/resource_management</resourceManagementSrcLocation>
     <customActionsRoot>src/main/resources/custom_actions</customActionsRoot>
     <ambariProperties>conf/unix/ambari.properties</ambariProperties>
-    <commonServicesSrcLocation>target/classes/common-services</commonServicesSrcLocation>
-    <stacksSrcLocation>target/classes/stacks/${stack.distribution}</stacksSrcLocation>
+    <commonServicesSrcLocation>src/main/resources/common-services</commonServicesSrcLocation>
+    <stacksSrcLocation>src/main/resources/stacks/${stack.distribution}</stacksSrcLocation>
     <tarballResourcesFolder>src/main/resources</tarballResourcesFolder>
     <skipPythonTests>false</skipPythonTests>
     <hadoop.version>2.7.1</hadoop.version>
@@ -562,14 +562,6 @@
           <exclude>common-services/**</exclude>
         </excludes>
       </resource>
-      <resource>
-        <directory>src/main/resources</directory>
-        <filtering>false</filtering>
-        <includes>
-          <include>stacks/**</include>
-          <include>common-services/**</include>
-        </includes>
-      </resource>
     </resources>
   </build>
   <profiles>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9fdaf4ed/ambari-server/src/main/assemblies/server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/assemblies/server.xml b/ambari-server/src/main/assemblies/server.xml
index a75de79..ca74185 100644
--- a/ambari-server/src/main/assemblies/server.xml
+++ b/ambari-server/src/main/assemblies/server.xml
@@ -301,7 +301,7 @@
     </file>
     <file>
       <fileMode>755</fileMode>
-      <source>target/classes/stacks/stack_advisor.py</source>
+      <source>src/main/resources/stacks/stack_advisor.py</source>
       <outputDirectory>/var/lib/ambari-server/resources/stacks</outputDirectory>
     </file>
     <file>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9fdaf4ed/ambari-server/src/main/java/org/apache/ambari/server/state/stack/JsonOsFamilyRoot.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/JsonOsFamilyRoot.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/JsonOsFamilyRoot.java
new file mode 100644
index 0000000..3f9158f
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/JsonOsFamilyRoot.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state.stack;
+
+import java.util.Map;
+
+public class JsonOsFamilyRoot {
+  private Map<String, JsonOsFamilyEntry> mapping;
+  private Map<String, String> aliases;
+  
+  public Map<String, JsonOsFamilyEntry> getMapping() {
+    return mapping;
+  }
+  public void setMapping(Map<String, JsonOsFamilyEntry> mapping) {
+    this.mapping = mapping;
+  }
+  public Map<String, String> getAliases() {
+    return aliases;
+  }
+  public void setAliases(Map<String, String> aliases) {
+    this.aliases = aliases;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9fdaf4ed/ambari-server/src/main/java/org/apache/ambari/server/state/stack/OsFamily.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/OsFamily.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/OsFamily.java
index 37a6db3..e494c44 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/OsFamily.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/OsFamily.java
@@ -48,11 +48,14 @@ public class OsFamily {
     private final String os_pattern = "([\\D]+|(?:[\\D]+[\\d]+[\\D]+))([\\d]*)";
     private final String OS_DISTRO = "distro";
     private final String OS_VERSION = "versions";
+    private final String OS_MAPPING = "mapping";
+    private final String OS_ALIASES = "aliases";
     private final String LOAD_CONFIG_MSG = "Could not load OS family definition from %s file";
     private final String FILE_NAME = "os_family.json";
     private final Logger LOG = LoggerFactory.getLogger(OsFamily.class);
 
     private Map<String, JsonOsFamilyEntry> osMap = null;
+    private JsonOsFamilyRoot jsonOsFamily = null;
 
   /**
    * Initialize object
@@ -77,9 +80,10 @@ public class OsFamily {
         if (!f.exists()) throw new Exception();
         inputStream = new FileInputStream(f);
 
-        Type type = new TypeToken<Map<String, JsonOsFamilyEntry>>() {}.getType();
+        Type type = new TypeToken<JsonOsFamilyRoot>() {}.getType();
         Gson gson = new Gson();
-        osMap = gson.fromJson(new InputStreamReader(inputStream), type);
+        jsonOsFamily = gson.fromJson(new InputStreamReader(inputStream), type);
+        osMap = jsonOsFamily.getMapping();
       } catch (Exception e) {
         LOG.error(String.format(LOAD_CONFIG_MSG, new File(SharedResourcesPath, FILE_NAME).toString()));
         throw new RuntimeException(e);

http://git-wip-us.apache.org/repos/asf/ambari/blob/9fdaf4ed/ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml
index 9decf51..dbf8506 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml
@@ -29,18 +29,6 @@
       <reponame>HDP-UTILS</reponame>
     </repo>
   </os>
-  <os family="amazon2015">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.2.6.0</baseurl>
-      <repoid>HDP-2.2</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6</baseurl>
-      <repoid>HDP-UTILS-1.1.0.20</repoid>
-      <reponame>HDP-UTILS</reponame>
-    </repo>
-  </os>
   <os family="suse11">
     <repo>
       <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11sp3/2.x/updates/2.2.6.0</baseurl>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9fdaf4ed/ambari-server/src/main/resources/stacks/HDP/2.3/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/repos/repoinfo.xml
index 279134b..142b87d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/repos/repoinfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/repos/repoinfo.xml
@@ -41,18 +41,6 @@
       <reponame>HDP-UTILS</reponame>
     </repo>
   </os>
-  <os family="amazon2015">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.3.0.0</baseurl>
-      <repoid>HDP-2.3</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6</baseurl>
-      <repoid>HDP-UTILS-1.1.0.20</repoid>
-      <reponame>HDP-UTILS</reponame>
-    </repo>
-  </os>
   <os family="suse11">
     <repo>
       <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11sp3/2.x/updates/2.3.0.0</baseurl>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9fdaf4ed/ambari-server/src/main/resources/stacks/HDP/2.4/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/repos/repoinfo.xml
index 6ac43f9..54bd3da 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/repos/repoinfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/repos/repoinfo.xml
@@ -41,18 +41,6 @@
       <reponame>HDP-UTILS</reponame>
     </repo>
   </os>
-  <os family="redhat2015">
-    <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.4.0.0</baseurl>
-      <repoid>HDP-2.4</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6</baseurl>
-      <repoid>HDP-UTILS-1.1.0.20</repoid>
-      <reponame>HDP-UTILS</reponame>
-    </repo>
-  </os>
   <os family="suse11">
     <repo>
       <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11sp3/2.x/updates/2.4.0.0</baseurl>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9fdaf4ed/ambari-server/src/test/python/TestOSCheck.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestOSCheck.py b/ambari-server/src/test/python/TestOSCheck.py
index cf114a1..d919fbc 100644
--- a/ambari-server/src/test/python/TestOSCheck.py
+++ b/ambari-server/src/test/python/TestOSCheck.py
@@ -30,7 +30,7 @@ from mock.mock import MagicMock
 
 from only_for_platform import os_distro_value, os_distro_value_linux
 
-from ambari_commons import OSCheck
+from ambari_commons import OSCheck, OSConst
 import os_check_type
 
 utils = __import__('ambari_server.utils').utils
@@ -50,7 +50,7 @@ class TestOSCheck(TestCase):
 
     # 1 - Any system
     mock_is_oracle_linux.return_value = False
-    mock_linux_distribution.return_value = ('my_os', '', '')
+    mock_linux_distribution.return_value = ('my_os', '2015.09', '')
     result = OSCheck.get_os_type()
     self.assertEquals(result, 'my_os')
 
@@ -66,13 +66,13 @@ class TestOSCheck(TestCase):
 
     # 3 - path exist: '/etc/oracle-release'
     mock_is_oracle_linux.return_value = True
-    mock_linux_distribution.return_value = ('some_os', '', '')
+    mock_linux_distribution.return_value = ('some_os', '1234', '')
     result = OSCheck.get_os_type()
     self.assertEquals(result, 'oraclelinux')
 
     # 4 - Common system
     mock_is_oracle_linux.return_value = False
-    mock_linux_distribution.return_value = ('CenToS', '', '')
+    mock_linux_distribution.return_value = ('CenToS', '4.56', '')
     result = OSCheck.get_os_type()
     self.assertEquals(result, 'centos')
 
@@ -99,31 +99,31 @@ class TestOSCheck(TestCase):
 
     # 1 - Any system
     mock_exists.return_value = False
-    mock_linux_distribution.return_value = ('MY_os', '', '')
+    mock_linux_distribution.return_value = ('MY_os', '5.6.7', '')
     result = OSCheck.get_os_family()
     self.assertEquals(result, 'my_os')
 
     # 2 - Redhat
     mock_exists.return_value = False
-    mock_linux_distribution.return_value = ('Centos Linux', '', '')
+    mock_linux_distribution.return_value = ('Centos Linux', '2.4', '')
     result = OSCheck.get_os_family()
     self.assertEquals(result, 'redhat')
 
     # 3 - Ubuntu
     mock_exists.return_value = False
-    mock_linux_distribution.return_value = ('Ubuntu', '', '')
+    mock_linux_distribution.return_value = ('Ubuntu', '14.04', '')
     result = OSCheck.get_os_family()
     self.assertEquals(result, 'ubuntu')
 
     # 4 - Suse
     mock_exists.return_value = False
     mock_linux_distribution.return_value = (
-    'suse linux enterprise server', '', '')
+    'suse linux enterprise server', '11.3', '')
     result = OSCheck.get_os_family()
     self.assertEquals(result, 'suse')
 
     mock_exists.return_value = False
-    mock_linux_distribution.return_value = ('SLED', '', '')
+    mock_linux_distribution.return_value = ('SLED', '1.2.3.4.5', '')
     result = OSCheck.get_os_family()
     self.assertEquals(result, 'suse')
 
@@ -141,7 +141,7 @@ class TestOSCheck(TestCase):
   def test_get_os_version(self, mock_linux_distribution):
 
     # 1 - Any system
-    mock_linux_distribution.return_value = ('', '123.45', '')
+    mock_linux_distribution.return_value = ('some_os', '123.45', '')
     result = OSCheck.get_os_version()
     self.assertEquals(result, '123.45')
 
@@ -159,7 +159,7 @@ class TestOSCheck(TestCase):
   def test_get_os_major_version(self, mock_linux_distribution):
 
     # 1
-    mock_linux_distribution.return_value = ('', '123.45.67', '')
+    mock_linux_distribution.return_value = ('abcd_os', '123.45.67', '')
     result = OSCheck.get_os_major_version()
     self.assertEquals(result, '123')
 
@@ -167,6 +167,21 @@ class TestOSCheck(TestCase):
     mock_linux_distribution.return_value = ('Suse', '11', '')
     result = OSCheck.get_os_major_version()
     self.assertEquals(result, '11')
+    
+  @patch.object(OSCheck, "os_distribution")
+  def test_aliases(self, mock_linux_distribution):
+    OSConst.OS_TYPE_ALIASES['qwerty_os123'] = 'aliased_os5'
+    OSConst.OS_FAMILY_COLLECTION.append({          
+          'name': 'aliased_os_family',
+          'os_list': ["aliased_os"]
+    })
+    
+    mock_linux_distribution.return_value = ('qwerty_os', '123.45.67', '')
+    
+    self.assertEquals(OSCheck.get_os_type(), 'aliased_os')
+    self.assertEquals(OSCheck.get_os_major_version(), '5')
+    self.assertEquals(OSCheck.get_os_version(), '5.45.67')
+    self.assertEquals(OSCheck.get_os_family(), 'aliased_os_family')
 
   @patch.object(OSCheck, "os_distribution")
   def test_get_os_release_name(self, mock_linux_distribution):

http://git-wip-us.apache.org/repos/asf/ambari/blob/9fdaf4ed/ambari-server/src/test/resources/os_family.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/os_family.json b/ambari-server/src/test/resources/os_family.json
index df55b61..ae6b19e 100644
--- a/ambari-server/src/test/resources/os_family.json
+++ b/ambari-server/src/test/resources/os_family.json
@@ -1,45 +1,50 @@
 {
-  "redhat": {
-    "distro": [
-      "redhat",
-      "fedora",
-      "centos",
-      "oraclelinux"
-    ],
-    "versions": [
-      5,
-      6
-    ]
+  "mapping": {
+    "redhat": {
+      "distro": [
+        "redhat",
+        "fedora",
+        "centos",
+        "oraclelinux"
+      ],
+      "versions": [
+        5,
+        6
+      ]
+    },
+    "ubuntu": {
+      "distro": [
+        "ubuntu",
+        "debian"
+      ],
+      "versions": [
+        12
+      ]
+    },
+    "suse": {
+      "distro": [
+        "sles",
+        "sled",
+        "opensuse",
+        "suse"
+      ],
+      "versions": [
+        11
+      ]
+    },
+    "winsrv": {
+      "distro": [
+        "win2008server",
+        "win2008serverr2",
+        "win2012server",
+        "win2012serverr2"
+      ],
+      "versions": [
+        6
+      ]
+    }
   },
-  "ubuntu": {
-    "distro": [
-      "ubuntu",
-      "debian"
-    ],
-    "versions": [
-      12
-    ]
-  },
-  "suse": {
-    "distro": [
-      "sles",
-      "sled",
-      "opensuse",
-      "suse"
-    ],
-    "versions": [
-      11
-    ]
-  },
-  "winsrv": {
-    "distro": [
-      "win2008server",
-      "win2008serverr2",
-      "win2012server",
-      "win2012serverr2"
-    ],
-    "versions": [
-      6
-    ]
+  "aliases": {
+    "amazon2015": "amazon6"
   }
-}
+}
\ No newline at end of file


[35/50] [abbrv] ambari git commit: AMBARI-15047. Add UI option to make Red Hat Satellite work (onechiporenko)

Posted by jo...@apache.org.
AMBARI-15047. Add UI option to make Red Hat Satellite work (onechiporenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c86964b5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c86964b5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c86964b5

Branch: refs/heads/branch-dev-patch-upgrade
Commit: c86964b542c5ac64c0f05ecf12fa1b2677e4ece8
Parents: 0ff86b1
Author: Oleg Nechiporenko <on...@apache.org>
Authored: Mon Feb 15 14:08:48 2016 +0200
Committer: Oleg Nechiporenko <on...@apache.org>
Committed: Mon Feb 15 14:38:15 2016 +0200

----------------------------------------------------------------------
 .../controllers/stackVersions/StackVersionsCreateCtrl.js     | 1 +
 .../controllers/stackVersions/StackVersionsEditCtrl.js       | 1 +
 .../main/resources/ui/admin-web/app/scripts/i18n.config.js   | 3 +++
 .../admin-web/app/views/stackVersions/stackVersionPage.html  | 8 ++++++++
 ambari-web/app/config.js                                     | 3 ++-
 ambari-web/app/controllers/wizard/step1_controller.js        | 2 ++
 ambari-web/app/messages.js                                   | 2 ++
 .../templates/main/admin/stack_upgrade/edit_repositories.hbs | 7 +++++++
 ambari-web/app/templates/wizard/step1.hbs                    | 7 +++++++
 .../main/admin/stack_upgrade/upgrade_version_box_view.js     | 8 ++++++--
 ambari-web/test/views/common/log_file_search_view_test.js    | 2 +-
 11 files changed, 40 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c86964b5/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
index 532e5f4..002d393 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
@@ -23,6 +23,7 @@ angular.module('ambariAdminConsole')
   $scope.createController = true;
   $scope.osList = [];
   $scope.skipValidation = false;
+  $scope.useRedhatSatellite = false;
   $scope.selectedOS = 0;
   $scope.repoSubversion = "";
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c86964b5/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
index 39a6700..3c38444 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
@@ -23,6 +23,7 @@ angular.module('ambariAdminConsole')
   $scope.editController = true;
   $scope.osList = [];
   $scope.skipValidation = false;
+  $scope.useRedhatSatellite = false;
   $scope.selectedOS = 0;
 
   $scope.loadStackVersionInfo = function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/c86964b5/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
index 91a1645..327ae03 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
@@ -304,6 +304,8 @@ angular.module('ambariAdminConsole')
       'os': 'OS',
       'baseURL': 'Base URL',
       'skipValidation': 'Skip Repository Base URL validation (Advanced)',
+      'useRedhatSatellite': 'Use RedHat Satellite/Spacewalk',
+
 
       'changeBaseURLConfirmation': {
         'title': 'Confirm Base URL Change',
@@ -314,6 +316,7 @@ angular.module('ambariAdminConsole')
         'baseURLs': 'Provide Base URLs for the Operating Systems you are configuring. Uncheck all other Operating Systems.',
         'validationFailed': 'Some of the repositories failed validation. Make changes to the base url or skip validation if you are sure that urls are correct',
         'skipValidationWarning': '<b>Warning:</b> This is for advanced users only. Use this option if you want to skip validation for Repository Base URLs.',
+        'useRedhatSatelliteWarning': 'Disable distributed repositories and use RedHat Satellite/Spacewalk channels instead',
         'filterListError': 'Fetch stack version filter list error',
         'versionCreated': 'Created version <a href="#/stackVersions/{{stackName}}/{{versionName}}/edit">{{stackName}}-{{versionName}}</a>',
         'versionCreationError': 'Version creation error',

http://git-wip-us.apache.org/repos/asf/ambari/blob/c86964b5/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
index 449d743..6870689 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
@@ -92,6 +92,14 @@
             </label>
           </div>
         </div>
+        <div class="col-sm-12 hidden" id="use-redhat">
+          <div class="checkbox">
+            <label>
+              <input type="checkbox" ng-model="useRedhatSatellite" ng-change="clearErrors()">
+              {{'versions.useRedhatSatellite' | translate}} <span class="glyphicon glyphicon-question-sign" tooltip-html-unsafe="{{'versions.alerts.useRedhatSatelliteWarning' | translate}}"></span>
+            </label>
+          </div>
+        </div>
       </div>
     </div>
   </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c86964b5/ambari-web/app/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/config.js b/ambari-web/app/config.js
index 7d727d7..bdca3ad 100644
--- a/ambari-web/app/config.js
+++ b/ambari-web/app/config.js
@@ -81,7 +81,8 @@ App.supports = {
   preInstallChecks: false,
   hostComboSearchBox: false,
   serviceAutoStart: false,
-  logSearch: false
+  logSearch: false,
+  redhatSatellite: false
 };
 
 if (App.enableExperimental) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/c86964b5/ambari-web/app/controllers/wizard/step1_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step1_controller.js b/ambari-web/app/controllers/wizard/step1_controller.js
index 34e121e..c357bf4 100644
--- a/ambari-web/app/controllers/wizard/step1_controller.js
+++ b/ambari-web/app/controllers/wizard/step1_controller.js
@@ -27,6 +27,8 @@ App.WizardStep1Controller = Em.Controller.extend({
    */
   skipValidationChecked: false,
 
+  useRedhatSatellite: false,
+
   selectedStack: function() {
     return App.Stack.find().findProperty('isSelected');
   }.property('content.stacks.@each.isSelected')

http://git-wip-us.apache.org/repos/asf/ambari/blob/c86964b5/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 909f55c..08ff073 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -542,7 +542,9 @@ Em.I18n.translations = {
   'installer.step1.advancedRepo.localRepo.label.baseUrl':'Repository Base URL',
   'installer.step1.advancedRepo.localRepo.label.stack':'Stack',
   'installer.step1.advancedRepo.skipValidation.tooltip':'<b>Warning:</b> This is for advanced users only. Use this option if you want to skip validation for Repository Base URLs.',
+  'installer.step1.advancedRepo.useRedhatSatellite.tooltip':'Disable distributed repositories and use RedHat Satellite/Spacewalk channels instead',
   'installer.step1.advancedRepo.skipValidation.message':'Skip Repository Base URL validation (Advanced)',
+  'installer.step1.advancedRepo.useRedhatSatellite.message': 'Use RedHat Satellite/Spacewalk',
   'installer.step1.attentionNeeded':'<b>Attention:</b> Repository URLs are REQUIRED before you can proceed.',
   'installer.step1.invalidURLAttention': '<b>Attention:</b> Please make sure all repository URLs are valid before proceeding.',
   'installer.step1.checkAtLeastOneAttention': '<b>Attention:</b> Please check at least one repository.',

http://git-wip-us.apache.org/repos/asf/ambari/blob/c86964b5/ambari-web/app/templates/main/admin/stack_upgrade/edit_repositories.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/stack_upgrade/edit_repositories.hbs b/ambari-web/app/templates/main/admin/stack_upgrade/edit_repositories.hbs
index 4ee3418..62c3d14 100644
--- a/ambari-web/app/templates/main/admin/stack_upgrade/edit_repositories.hbs
+++ b/ambari-web/app/templates/main/admin/stack_upgrade/edit_repositories.hbs
@@ -50,3 +50,10 @@
     <i class="icon-question-sign" rel="skip-validation-tooltip"
        data-toggle="tooltip" {{translateAttr title="installer.step1.advancedRepo.skipValidation.tooltip"}}></i></label>
 </div>
+{{#if App.supports.redhatSatellite}}
+  <div id="use-redhat">
+    <label>{{view Ember.Checkbox classNames="align-checkbox" checkedBinding="view.parentView.useRedhatSatellite"}}{{t installer.step1.advancedRepo.useRedhatSatellite.message}}
+      <i class="icon-question-sign" rel="use-redhat-tooltip"
+         data-toggle="tooltip" {{translateAttr title="installer.step1.advancedRepo.useRedhatSatellite.tooltip"}}></i></label>
+  </div>
+{{/if}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c86964b5/ambari-web/app/templates/wizard/step1.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/step1.hbs b/ambari-web/app/templates/wizard/step1.hbs
index 478e459..e59b76d 100644
--- a/ambari-web/app/templates/wizard/step1.hbs
+++ b/ambari-web/app/templates/wizard/step1.hbs
@@ -98,6 +98,13 @@
               <i class="icon-question-sign" rel="skip-validation-tooltip"
                  data-toggle="tooltip" {{translateAttr title="installer.step1.advancedRepo.skipValidation.tooltip"}}></i></label>
           </div>
+          {{#if App.supports.redhatSatellite}}
+            <div id="use-redhat">
+              <label>{{view Ember.Checkbox classNames="align-checkbox" checkedBinding="useRedhatSatellite"}}{{t installer.step1.advancedRepo.useRedhatSatellite.message}}
+                <i class="icon-question-sign" rel="use-redhat-tooltip"
+                   data-toggle="tooltip" {{translateAttr title="installer.step1.advancedRepo.useRedhatSatellite.tooltip"}}></i></label>
+            </div>
+          {{/if}}
           {{#if view.invalidFormatUrlExist}}
             <div class="alert">{{t installer.step1.attentionNeeded}}</div>
           {{/if}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c86964b5/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
index f8dd4f2..20280fe 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
@@ -307,6 +307,7 @@ App.UpgradeVersionBoxView = Em.View.extend({
     return this.get('isRepoUrlsEditDisabled') ? null : App.ModalPopup.show({
       classNames: ['repository-list', 'sixty-percent-width-modal'],
       skipValidation: false,
+      useRedhatSatellite: false,
       autoHeight: false,
       /**
        * @type {boolean}
@@ -333,14 +334,17 @@ App.UpgradeVersionBoxView = Em.View.extend({
 
           this.get('content.operatingSystems').forEach(function (os) {
             os.get('repositories').forEach(function (repo) {
-              disablePrimary = (!disablePrimary) ? repo.get('hasError') : disablePrimary;
+              disablePrimary = !disablePrimary ? repo.get('hasError') : disablePrimary;
             }, this);
           }, this);
           this.set('parentView.disablePrimary', disablePrimary);
         },
         templateName: require('templates/main/admin/stack_upgrade/edit_repositories'),
         didInsertElement: function () {
-          App.tooltip($("[rel=skip-validation-tooltip]"), {placement: 'right'});
+          App.tooltip($("[rel=skip-validation-tooltip], [rel=use-redhat-tooltip]"), {placement: 'right'});
+        },
+        willDestroyElement: function () {
+          $("[rel=skip-validation-tooltip], [rel=use-redhat-tooltip]").tooltip('destroy');
         }
       }),
       header: Em.I18n.t('common.repositories'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/c86964b5/ambari-web/test/views/common/log_file_search_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/common/log_file_search_view_test.js b/ambari-web/test/views/common/log_file_search_view_test.js
index ca208b3..850230d 100644
--- a/ambari-web/test/views/common/log_file_search_view_test.js
+++ b/ambari-web/test/views/common/log_file_search_view_test.js
@@ -32,7 +32,7 @@ describe('App.LogFileSearchView', function() {
         isIncluded: !!isIncluded
       });
     };
-    var cases = [
+    [
       {
         viewContent: {
           keywordsFilterValue: 'some_keyword'


[19/50] [abbrv] ambari git commit: AMBARI-15034. Add checks and alerts when clusterconfigmapping has multiple selected entries for a config type.(vbrodetskyi)

Posted by jo...@apache.org.
AMBARI-15034. Add checks and alerts when clusterconfigmapping has multiple selected entries for a config type.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/604040fb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/604040fb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/604040fb

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 604040fbf71654c2218baef970c27d71b5f449c1
Parents: 350e9b3
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Fri Feb 12 08:06:01 2016 +0200
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Fri Feb 12 08:06:01 2016 +0200

----------------------------------------------------------------------
 .../ambari/server/checks/CheckDatabaseHelper.java | 18 +++++++++---------
 .../server/checks/CheckDatabaseHelperTest.java    |  5 ++++-
 2 files changed, 13 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/604040fb/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDatabaseHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDatabaseHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDatabaseHelper.java
index a078c8a..9213738 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDatabaseHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDatabaseHelper.java
@@ -152,19 +152,23 @@ public class CheckDatabaseHelper {
   * than one selected version it's a bug and we are showing error message for user.
   * */
   protected void checkForConfigsSelectedMoreThanOnce() {
-    String GET_CONFIGS_SELECTED_MORE_THAN_ONCE_QUERY = "select type_name from clusterconfigmapping group by type_name having sum(selected) > 1";
-    Set<String> configsSelectedMoreThanOnce = new HashSet<>();
+    String GET_CONFIGS_SELECTED_MORE_THAN_ONCE_QUERY = "select c.cluster_name,type_name from clusterconfigmapping ccm " +
+            "join clusters c on ccm.cluster_id=c.cluster_id " +
+            "group by c.cluster_name,type_name " +
+            "having sum(selected) > 1";
+    Multimap<String, String> configsSelectedMoreThanOnce = HashMultimap.create();
     ResultSet rs = null;
     try {
       Statement statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE);
       rs = statement.executeQuery(GET_CONFIGS_SELECTED_MORE_THAN_ONCE_QUERY);
       if (rs != null) {
         while (rs.next()) {
-          configsSelectedMoreThanOnce.add(rs.getString("type_name"));
+          configsSelectedMoreThanOnce.put(rs.getString("cluster_name"), rs.getString("type_name"));
         }
       }
-      if (!configsSelectedMoreThanOnce.isEmpty()) {
-        LOG.error("You have config(s) that is(are) selected more than once in clusterconfigmapping: " + StringUtils.join(configsSelectedMoreThanOnce, ","));
+      for (String clusterName : configsSelectedMoreThanOnce.keySet()) {
+        LOG.error(String.format("You have config(s), in cluster %s, that is(are) selected more than once in clusterconfigmapping: %s",
+                clusterName ,StringUtils.join(configsSelectedMoreThanOnce.get(clusterName), ",")));
       }
     } catch (SQLException e) {
       LOG.error("Exception occurred during check for config selected more than ones procedure: ", e);
@@ -193,10 +197,6 @@ public class CheckDatabaseHelper {
       rs = statement.executeQuery(GET_HOSTS_WITHOUT_STATUS_QUERY);
       if (rs != null) {
         while (rs.next()) {
-          LOG.error(rs.getString("host_name"));
-          LOG.error(rs.getString("HOST_NAME"));
-          System.out.println("ERROR" + rs.getString("HOST_NAME"));
-          System.out.println("ERROR" + rs.getString("host_name"));
           hostsWithoutStatus.add(rs.getString("host_name"));
         }
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/604040fb/ambari-server/src/test/java/org/apache/ambari/server/checks/CheckDatabaseHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/CheckDatabaseHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/CheckDatabaseHelperTest.java
index e329ab7..1c2765c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/CheckDatabaseHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/CheckDatabaseHelperTest.java
@@ -111,7 +111,10 @@ public class CheckDatabaseHelperTest {
 
     expect(mockDBDbAccessor.getConnection()).andReturn(mockConnection);
     expect(mockConnection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE)).andReturn(mockStatement);
-    expect(mockStatement.executeQuery("select type_name from clusterconfigmapping group by type_name having sum(selected) > 1")).andReturn(mockResultSet);
+    expect(mockStatement.executeQuery("select c.cluster_name,type_name from clusterconfigmapping ccm " +
+            "join clusters c on ccm.cluster_id=c.cluster_id " +
+            "group by c.cluster_name,type_name " +
+            "having sum(selected) > 1")).andReturn(mockResultSet);
 
     CheckDatabaseHelper checkDatabaseHelper = new CheckDatabaseHelper(mockDBDbAccessor, mockInjector, null);
 


[21/50] [abbrv] ambari git commit: AMBARI-15025. Hosts page: Filtering works incorrectly with multiple filters applied (onechiporenko)

Posted by jo...@apache.org.
AMBARI-15025. Hosts page: Filtering works incorrectly with multiple filters applied (onechiporenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ddba3c5d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ddba3c5d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ddba3c5d

Branch: refs/heads/branch-dev-patch-upgrade
Commit: ddba3c5d4e2dbd7f0edf07429df4fa24bbaa2d21
Parents: 30438e9
Author: Oleg Nechiporenko <on...@apache.org>
Authored: Fri Feb 12 12:21:25 2016 +0200
Committer: Oleg Nechiporenko <on...@apache.org>
Committed: Fri Feb 12 12:23:04 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/controllers/main/host.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ddba3c5d/ambari-web/app/controllers/main/host.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/host.js b/ambari-web/app/controllers/main/host.js
index 047e19f..a38b2db 100644
--- a/ambari-web/app/controllers/main/host.js
+++ b/ambari-web/app/controllers/main/host.js
@@ -106,7 +106,7 @@ App.MainHostController = Em.ArrayController.extend(App.TableServerMixin, {
     },
     {
       name: 'criticalWarningAlertsCount',
-      key: 'alerts_summary/CRITICAL{0}|alerts_summary/WARNING{1}',
+      key: '(alerts_summary/CRITICAL{0}|alerts_summary/WARNING{1})',
       type: 'CUSTOM'
     },
     {


[02/50] [abbrv] ambari git commit: AMBARI-15009. Log Search: Add link to background operations popup navigate to Host Details Logs tab with pre-set condition (alexantonenko)

Posted by jo...@apache.org.
AMBARI-15009. Log Search: Add link to background operations popup navigate to Host Details Logs tab with pre-set condition (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fdb101bd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fdb101bd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fdb101bd

Branch: refs/heads/branch-dev-patch-upgrade
Commit: fdb101bdd78a4d6d2626008dc4a146374283e8f3
Parents: 754d0fa
Author: Alex Antonenko <hi...@gmail.com>
Authored: Thu Feb 11 12:46:33 2016 +0200
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Thu Feb 11 18:17:50 2016 +0200

----------------------------------------------------------------------
 .../main/admin/kerberos/step7_controller.js     | 12 ++--
 ambari-web/app/messages.js                      |  1 +
 ambari-web/app/styles/application.less          | 12 ++--
 .../templates/common/host_progress_popup.hbs    |  5 ++
 ambari-web/app/utils/ajax/ajax.js               | 15 ++++
 ambari-web/app/views/application.js             | 22 ++----
 .../common/host_progress_popup_body_view.js     | 75 +++++++++++++++++++-
 .../app/views/common/log_file_search_view.js    |  2 +-
 ambari-web/app/views/common/modal_popup.js      | 27 +++++++
 .../modal_popups/log_file_search_popup.js       | 12 +++-
 .../host_progress_popup_body_view_test.js       | 54 +++++++++++++-
 11 files changed, 204 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/fdb101bd/ambari-web/app/controllers/main/admin/kerberos/step7_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/kerberos/step7_controller.js b/ambari-web/app/controllers/main/admin/kerberos/step7_controller.js
index 71e94ca..2a4fb90 100644
--- a/ambari-web/app/controllers/main/admin/kerberos/step7_controller.js
+++ b/ambari-web/app/controllers/main/admin/kerberos/step7_controller.js
@@ -48,13 +48,13 @@ App.KerberosWizardStep7Controller = App.KerberosProgressPageController.extend({
       }
     };
     if (isRetry) {
-      // on retry we have to unkerberize cluster
-      this.unkerberizeCluster().always(function() {
-        // clear current request object before start of kerberize process
-        self.set('request', kerberizeRequest);
-        self.clearStage();
-        self.loadStep();
+      // on retry send force update
+      self.set('request', {
+        name: 'KERBERIZE_CLUSTER',
+        ajaxName: 'admin.kerberize.cluster.force'
       });
+      self.clearStage();
+      self.loadStep();
     } else {
       this.set('request', kerberizeRequest);
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fdb101bd/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 59877a5..909f55c 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -313,6 +313,7 @@ Em.I18n.translations = {
   'common.keywords': 'Keywods',
   'common.levels': 'Levels',
   'common.extension': 'Extension',
+  'common.logs': 'Logs',
 
   'models.alert_instance.tiggered.verbose': "Occurred on {0} <br> Checked on {1}",
   'models.alert_definition.triggered.verbose': "Occurred on {0}",

http://git-wip-us.apache.org/repos/asf/ambari/blob/fdb101bd/ambari-web/app/styles/application.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/application.less b/ambari-web/app/styles/application.less
index 57b7e76..4e2b5d1 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -1861,13 +1861,13 @@ a:focus {
       float: right;
       a {
         cursor: pointer;
-      }
-      .task-detail-copy {
-        margin-right: 12px;
         float: left;
-      }
-      .task-detail-open-dialog {
-        float: right;
+        margin-right: 12px;
+
+        &:last-child {
+          margin-right: 0;
+          float: right;
+        }
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fdb101bd/ambari-web/app/templates/common/host_progress_popup.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/host_progress_popup.hbs b/ambari-web/app/templates/common/host_progress_popup.hbs
index 00f2e08..fe330f7 100644
--- a/ambari-web/app/templates/common/host_progress_popup.hbs
+++ b/ambari-web/app/templates/common/host_progress_popup.hbs
@@ -195,6 +195,11 @@
         <div class="task-detail-ico-wrap">
           <a {{translateAttr title="common.fullLogPopup.clickToCopy"}} {{action "textTrigger" taskInfo target="view"}} class="task-detail-copy"><i
                   class="icon-copy"></i> {{t common.copy}}</a>
+          {{#if App.supports.logSearch}}
+            <a {{action navigateToHostLogs target="view"}} {{bindAttr class="view.isLogsLinkVisible::hidden"}} href="#">
+              <i class="icon-file"></i> {{t common.logs}}
+            </a>
+          {{/if}}
           <a {{translateAttr title="common.openNewWindow"}} {{action openTaskLogInDialog}} class="task-detail-open-dialog"><i
                   class="icon-external-link"></i> {{t common.open}}</a>
         </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fdb101bd/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index ce6a196..f2174f3 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -1477,6 +1477,21 @@ var urls = {
     }
   },
 
+  'admin.kerberize.cluster.force': {
+    'type': 'PUT',
+    'real': '/clusters/{clusterName}?force_toggle_kerberos=true',
+    'mock': '/data/wizard/kerberos/kerberize_cluster.json',
+    'format': function (data) {
+      return {
+        data: JSON.stringify({
+          Clusters: {
+            security_type: "KERBEROS"
+          }
+        })
+      }
+    }
+  },
+
   'admin.unkerberize.cluster.skip': {
     'type': 'PUT',
     'real': '/clusters/{clusterName}?manage_kerberos_identities=false',

http://git-wip-us.apache.org/repos/asf/ambari/blob/fdb101bd/ambari-web/app/views/application.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/application.js b/ambari-web/app/views/application.js
index 8a83144..2a8ea00 100644
--- a/ambari-web/app/views/application.js
+++ b/ambari-web/app/views/application.js
@@ -26,28 +26,16 @@ App.ApplicationView = Em.View.extend({
     // on 'Enter' pressed, trigger modal window primary button if primary button is enabled(green)
     // on 'Esc' pressed, close the modal
     $(document).keydown(function(event){
-      if (event.which == 13 || event.keyCode == 13 ) {
-        var primaryButton = $(document).find('#modal > .modal-footer > .btn-success').last();
-        if ((!$("*:focus").is("textarea")) && primaryButton.length > 0 && primaryButton.attr('disabled') != 'disabled') {
-          event.preventDefault();
-          event.stopPropagation();
-          primaryButton.click();
-          return false;
-        }
+      if (event.which === 13 || event.keyCode === 13 ) {
+        $('#modal').trigger('enter-key-pressed');
       }
       return true;
     });
     $(document).keyup(function(event){
-      if (event.which == 27 || event.keyCode == 27) {
-        var closeButton = $(document).find('#modal > .modal-header > .close').last();
-        if (closeButton.length > 0) {
-          event.preventDefault();
-          event.stopPropagation();
-          closeButton.click();
-          return false;
-        }
+      if (event.which === 27 || event.keyCode === 27) {
+        $('#modal').trigger('escape-key-pressed');
       }
       return true;
     });
   }
-});
\ No newline at end of file
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/fdb101bd/ambari-web/app/views/common/host_progress_popup_body_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/host_progress_popup_body_view.js b/ambari-web/app/views/common/host_progress_popup_body_view.js
index cdabc5e..197551f 100644
--- a/ambari-web/app/views/common/host_progress_popup_body_view.js
+++ b/ambari-web/app/views/common/host_progress_popup_body_view.js
@@ -21,6 +21,12 @@ var batchUtils = require('utils/batch_scheduled_requests');
 var date = require('utils/date/date');
 
 /**
+ * @typedef {object} TaskRelationObject
+ * @property {string} type relation type 'service', 'component'
+ * @property {string} [value] optional value of relation e.g. name of component or service
+ */
+
+/**
  * Option for "filter by state" dropdown
  * @typedef {object} progressPopupCategoryObject
  * @property {string} value "all|pending|in progress|failed|completed|aborted|timedout"
@@ -576,6 +582,73 @@ App.HostProgressPopupBodyView = App.TableView.extend({
   },
 
   /**
+   * Navigate to host details logs tab with preset filter.
+   */
+  navigateToHostLogs: function() {
+    var relationType = this._determineRoleRelation(this.get('openedTask')),
+        hostModel = App.Host.find().findProperty('id', this.get('currentHost.name')),
+        queryParams = [],
+        model;
+
+    if (relationType.type === 'component') {
+      model = App.StackServiceComponent.find().findProperty('componentName', relationType.value);
+      queryParams.push('service_name=' + model.get('serviceName'));
+      queryParams.push('component_name=' + relationType.value);
+    }
+    if (relationType.type === 'service') {
+      queryParams.push('service_name=' + relationType.value);
+    }
+    App.router.transitionTo('main.hosts.hostDetails.logs', hostModel, { query: '?' + queryParams.join('&') });
+    if (this.get('parentView') && typeof this.get('parentView').onClose === 'function') this.get('parentView').onClose();
+  },
+
+  /**
+  /**
+  * Determines if opened task related to service or component.
+  *
+  * @return {boolean} <code>true</code> when relates to service or component
+  */
+  isLogsLinkVisible: function() {
+    if (!this.get('openedTask') || !this.get('openedTask.id')) return false;
+    return !!this._determineRoleRelation(this.get('openedTask'));
+  }.property('openedTask'),
+
+  /**
+   * @param  {wrappedTask} taskInfo
+   * @return {boolean|TaskRelationObject}
+   */
+  _determineRoleRelation: function(taskInfo) {
+    var foundComponentName,
+        foundServiceName,
+        componentNames = App.StackServiceComponent.find().mapProperty('componentName'),
+        serviceNames = App.StackService.find().mapProperty('serviceName'),
+        taskLog = this.get('currentHost.logTasks').findProperty('Tasks.id', Em.get(taskInfo, 'id')) || {},
+        role = Em.getWithDefault(taskLog, 'Tasks.role', false),
+        eqlFn = function(compare) {
+          return function(item) {
+            return item === compare;
+          };
+        };
+
+    if (!role) {
+      return false;
+    }
+    // component service check
+    if (role.endsWith('_SERVICE_CHECK')) {
+      role = role.replace('_SERVICE_CHECK', '');
+    }
+    foundComponentName = componentNames.filter(eqlFn(role))[0];
+    foundServiceName = serviceNames.filter(eqlFn(role))[0];
+    if (foundComponentName || foundServiceName) {
+      return {
+        type: foundComponentName ? 'component' : 'service',
+        value: foundComponentName || foundServiceName
+      }
+    }
+    return false;
+  },
+
+  /**
    * @type {boolean}
    */
   isRequestSchedule: function () {
@@ -745,4 +818,4 @@ App.HostProgressPopupBodyView = App.TableView.extend({
     $(".task-detail-log-maintext").css("display", "block");
   }
 
-});
\ No newline at end of file
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/fdb101bd/ambari-web/app/views/common/log_file_search_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/log_file_search_view.js b/ambari-web/app/views/common/log_file_search_view.js
index c242ec8..6d402d1 100644
--- a/ambari-web/app/views/common/log_file_search_view.js
+++ b/ambari-web/app/views/common/log_file_search_view.js
@@ -128,7 +128,7 @@ App.LogFileSearchView = Em.View.extend(App.InfiniteScrollMixin, {
   /**
   * Make request and get content with applied filters.
   */
-  fetchContent: function(params) {
+  fetchContent: function() {
     console.debug('Make Request with params:', this.serializeFilters());
   },
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/fdb101bd/ambari-web/app/views/common/modal_popup.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/modal_popup.js b/ambari-web/app/views/common/modal_popup.js
index 3402e80..f016fcf 100644
--- a/ambari-web/app/views/common/modal_popup.js
+++ b/ambari-web/app/views/common/modal_popup.js
@@ -65,6 +65,9 @@ App.ModalPopup = Ember.View.extend({
   showCloseButton: true,
 
   didInsertElement: function () {
+    this.$().find('#modal')
+      .on('enter-key-pressed', this.enterKeyPressed.bind(this))
+      .on('escape-key-pressed', this.escapeKeyPressed.bind(this));
     if (this.autoHeight && !$.mocho) {
       var block = this.$().find('#modal > .modal-body').first();
       if(block.offset()) {
@@ -76,6 +79,30 @@ App.ModalPopup = Ember.View.extend({
     this.focusElement(firstInputElement);
   },
 
+  willDestroyElement: function() {
+    this.$().find('#modal').off('enter-key-pressed').off('escape-key-pressed');
+  },
+
+  escapeKeyPressed: function() {
+    var closeButton = this.$().find('.modal-header > .close').last();
+    if (closeButton.length > 0) {
+      event.preventDefault();
+      event.stopPropagation();
+      closeButton.click();
+      return false;
+    }
+  },
+
+  enterKeyPressed: function() {
+    var primaryButton = this.$().find('.modal-footer > .btn-success').last();
+    if ((!$("*:focus").is("textarea")) && primaryButton.length > 0 && primaryButton.attr('disabled') !== 'disabled') {
+      event.preventDefault();
+      event.stopPropagation();
+      primaryButton.click();
+      return false;
+    }
+  },
+
   /**
    * If popup is opened from another popup it should be displayed above
    * @method fitZIndex

http://git-wip-us.apache.org/repos/asf/ambari/blob/fdb101bd/ambari-web/app/views/common/modal_popups/log_file_search_popup.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/modal_popups/log_file_search_popup.js b/ambari-web/app/views/common/modal_popups/log_file_search_popup.js
index 4730a19..4e2032b 100644
--- a/ambari-web/app/views/common/modal_popups/log_file_search_popup.js
+++ b/ambari-web/app/views/common/modal_popups/log_file_search_popup.js
@@ -21,6 +21,16 @@ var App = require('app');
 App.LogFileSearchPopup = function(header) {
   return App.ModalPopup.show({
     classNames: ['modal-full-width', 'sixty-percent-width-modal', 'log-file-search-popup'],
-    bodyClass: App.LogFileSearchView.extend({})
+    header: header,
+    bodyView: null,
+    bodyClass: App.LogFileSearchView.extend({
+      didInsertElement: function() {
+        this.set('parentView.bodyView', this);
+        this._super();
+      }
+    }),
+    enterKeyPressed: function() {
+      this.get('bodyView').fetchContent();
+    }
   });
 };

http://git-wip-us.apache.org/repos/asf/ambari/blob/fdb101bd/ambari-web/test/views/common/host_progress_popup_body_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/common/host_progress_popup_body_view_test.js b/ambari-web/test/views/common/host_progress_popup_body_view_test.js
index 8642d9b..a14cb26 100644
--- a/ambari-web/test/views/common/host_progress_popup_body_view_test.js
+++ b/ambari-web/test/views/common/host_progress_popup_body_view_test.js
@@ -55,4 +55,56 @@ describe('App.HostProgressPopupBodyView', function () {
 
   });
 
-});
\ No newline at end of file
+  describe('_determineRoleRelation', function() {
+    var cases;
+
+    beforeEach(function() {
+      sinon.stub(App.StackServiceComponent, 'find').returns([{componentName: 'DATANODE'}])
+      sinon.stub(App.StackService, 'find').returns([{serviceName: 'HDFS'}])
+    });
+
+    afterEach(function() {
+      App.StackServiceComponent.find.restore();
+      App.StackService.find.restore();
+    });
+
+    cases = [
+      {
+        task: { role: 'HDFS_SERVICE_CHECK'},
+        m: 'Role is HDFS_SERVICE_CHECK',
+        e: {
+          type: 'service',
+          value: 'HDFS'
+        }
+      },
+      {
+        task: { role: 'DATANODE'},
+        m: 'Role is DATANODE',
+        e: {
+          type: 'component',
+          value: 'DATANODE'
+        }
+      },
+      {
+        task: { role: 'UNDEFINED'},
+        m: 'Role is UNDEFINED',
+        e: false
+      }
+    ];
+
+    cases.forEach(function(test) {
+      it(test.m, function() {
+        view.reopen({
+          currentHost: Em.Object.create({
+            logTasks: [
+              { Tasks: { id: 1, role: test.task.role }}
+            ]
+          })
+        });
+
+        var ret = view._determineRoleRelation(Em.Object.create({ id: 1 }));
+        expect(ret).to.be.eql(test.e);
+      });
+    });
+  });
+});


[29/50] [abbrv] ambari git commit: AMBARI-14701: assign_master_components.js breaks next step in certain case (mithmatt via jaoki)

Posted by jo...@apache.org.
AMBARI-14701: assign_master_components.js breaks next step in certain case (mithmatt via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9e5dd9f8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9e5dd9f8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9e5dd9f8

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 9e5dd9f8990956e1c9de650822d52d4fdddaba0a
Parents: e187553
Author: Jun Aoki <ja...@apache.org>
Authored: Fri Feb 12 18:15:15 2016 -0800
Committer: Jun Aoki <ja...@apache.org>
Committed: Fri Feb 12 18:15:15 2016 -0800

----------------------------------------------------------------------
 .../mixins/wizard/assign_master_components.js   | 37 +++++++++++++-------
 1 file changed, 25 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9e5dd9f8/ambari-web/app/mixins/wizard/assign_master_components.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/wizard/assign_master_components.js b/ambari-web/app/mixins/wizard/assign_master_components.js
index 0693507..a2440a4 100644
--- a/ambari-web/app/mixins/wizard/assign_master_components.js
+++ b/ambari-web/app/mixins/wizard/assign_master_components.js
@@ -1084,7 +1084,6 @@ App.AssignMasterComponents = Em.Mixin.create({
         }else{
           App.router.set('nextBtnClickInProgress', false);
         }
-        self.set('submitButtonClicked', false);
       };
 
       if (this.get('useServerValidation')) {
@@ -1094,6 +1093,7 @@ App.AssignMasterComponents = Em.Mixin.create({
       } else {
         self.updateIsSubmitDisabled();
         goNextStepIfValid();
+        self.set('submitButtonClicked', false);
       }
     }
   },
@@ -1104,18 +1104,31 @@ App.AssignMasterComponents = Em.Mixin.create({
    */
   showValidationIssuesAcceptBox: function(callback) {
     var self = this;
-    if (self.get('anyWarning') || self.get('anyError')) {
-      App.ModalPopup.show({
-        primary: Em.I18n.t('common.continueAnyway'),
-        header: Em.I18n.t('installer.step5.validationIssuesAttention.header'),
-        body: Em.I18n.t('installer.step5.validationIssuesAttention'),
-        onPrimary: function () {
-          this.hide();
-          callback();
-        }
-      });
-    } else {
+
+    // If there are no warnings and no errors, return
+    if (!self.get('anyWarning') && !self.get('anyError')) {
       callback();
+      self.set('submitButtonClicked', false);
+      return;
     }
+
+    App.ModalPopup.show({
+      primary: Em.I18n.t('common.continueAnyway'),
+      header: Em.I18n.t('installer.step5.validationIssuesAttention.header'),
+      body: Em.I18n.t('installer.step5.validationIssuesAttention'),
+      onPrimary: function () {
+        this._super();
+        callback();
+        self.set('submitButtonClicked', false);
+      },
+      onSecondary: function () {
+        this._super();
+        self.set('submitButtonClicked', false);
+      },
+      onClose: function () {
+        this._super();
+        self.set('submitButtonClicked', false);
+      }
+    });
   }
 });


[26/50] [abbrv] ambari git commit: AMBARI-15030 : Fix unreasonable default heap settings for AMS HBase heapsize and xmn size (avijayan)

Posted by jo...@apache.org.
AMBARI-15030 : Fix unreasonable default heap settings for AMS HBase heapsize and xmn size (avijayan)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aac5389f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aac5389f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aac5389f

Branch: refs/heads/branch-dev-patch-upgrade
Commit: aac5389fea7867141a3f4f421b2bc0d1e61117f3
Parents: f7055ae
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Fri Feb 12 10:24:44 2016 -0800
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Fri Feb 12 10:49:22 2016 -0800

----------------------------------------------------------------------
 .../AMBARI_METRICS/0.1.0/configuration/ams-hbase-env.xml       | 6 +++---
 .../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py  | 2 ++
 .../src/test/python/stacks/2.2/common/test_stack_advisor.py    | 3 ++-
 3 files changed, 7 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/aac5389f/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-env.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-env.xml
index 191e8b2..90e1307 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-env.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-env.xml
@@ -47,7 +47,7 @@
   </property>
   <property>
     <name>hbase_regionserver_heapsize</name>
-    <value>512</value>
+    <value>756</value>
     <description>
         HBase RegionServer Heap Size. In embedded mode, total heap size is
         sum of master and regionserver heap sizes.
@@ -69,7 +69,7 @@
   </property>
   <property>
     <name>regionserver_xmn_size</name>
-    <value>256</value>
+    <value>128</value>
     <description>HBase RegionServer maximum value for young generation heap size.</description>
     <value-attributes>
       <type>int</type>
@@ -84,7 +84,7 @@
   </property>
   <property>
     <name>hbase_master_xmn_size</name>
-    <value>256</value>
+    <value>102</value>
     <description>
       HBase Master maximum value for young generation heap size.
     </description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/aac5389f/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index af21008..7c69ac9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -595,6 +595,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
 
     # Distributed mode heap size
     if operatingMode == "distributed":
+      hbase_heapsize = max(hbase_heapsize, 756)
       putHbaseEnvProperty("hbase_master_heapsize", "512")
       putHbaseEnvProperty("hbase_master_xmn_size", "102") #20% of 512 heap size
       putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_heapsize)
@@ -602,6 +603,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     else:
       # Embedded mode heap size : master + regionserver
       hbase_rs_heapsize = 512
+      putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_rs_heapsize)
       putHbaseEnvProperty("hbase_master_heapsize", hbase_heapsize)
       putHbaseEnvProperty("hbase_master_xmn_size", round_to_n(0.15*(hbase_heapsize+hbase_rs_heapsize),64))
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/aac5389f/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index d2497fd..14a28d3 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -2025,7 +2025,8 @@ class TestHDP22StackAdvisor(TestCase):
       "ams-hbase-env": {
         "properties": {
           "hbase_master_xmn_size": "128",
-          "hbase_master_heapsize": "512"
+          "hbase_master_heapsize": "512",
+          "hbase_regionserver_heapsize": "512"
         }
       },
       "ams-env": {


[23/50] [abbrv] ambari git commit: AMBARI-15002. HiveServerInteractive. Adding skelton code for Hive Server Interactive component support. (swapan shridhar via jaimin)

Posted by jo...@apache.org.
AMBARI-15002. HiveServerInteractive. Adding skelton code for Hive Server Interactive component support. (swapan shridhar via jaimin)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b22aa2e4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b22aa2e4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b22aa2e4

Branch: refs/heads/branch-dev-patch-upgrade
Commit: b22aa2e4a848ddf39ea7757902f55a1125cb1889
Parents: 77daca7
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Fri Feb 12 20:54:06 2016 +0530
Committer: Jaimin Jetly <ja...@hortonworks.com>
Committed: Fri Feb 12 20:54:06 2016 +0530

----------------------------------------------------------------------
 .../HIVE/0.12.0.2.0/metainfo.xml                |    1 +
 .../package/scripts/hive_server_interactive.py  |   93 +
 .../configuration/hive-interactive-site.xml     | 2053 ++++++++++++++++++
 .../stacks/HDP/2.4/services/HIVE/metainfo.xml   |   49 +
 4 files changed, 2196 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b22aa2e4/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
index dfa20a5..a71e392 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
@@ -320,6 +320,7 @@
         <config-type>hive-log4j</config-type>
         <config-type>hive-exec-log4j</config-type>
         <config-type>hive-env</config-type>
+        <config-type>hive-interactive-site</config-type>
         <config-type>webhcat-site</config-type>
         <config-type>webhcat-env</config-type>
         <config-type>ranger-hive-plugin-properties</config-type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b22aa2e4/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
new file mode 100644
index 0000000..6fa3081
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions.get_hdp_version import get_hdp_version
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.security_commons import build_expectations, \
+    cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+    FILE_TYPE_XML
+from ambari_commons import OSCheck, OSConst
+if OSCheck.is_windows_family():
+    from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+from setup_ranger_hive import setup_ranger_hive
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons.constants import UPGRADE_TYPE_ROLLING
+from resource_management.core.logger import Logger
+
+import hive_server_upgrade
+from hive import hive
+from hive_service import hive_service
+
+
+class HiveServerInteractive(Script):
+    def install(self, env):
+        pass
+
+    def configure(self, env):
+        pass
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HiveServerWindows(HiveServerInteractive):
+    def start(self, env):
+        pass
+
+    def stop(self, env):
+        pass
+
+    def status(self, env):
+        pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveServerDefault(HiveServerInteractive):
+    def get_stack_to_component(self):
+        pass
+
+    def start(self, env, upgrade_type=None):
+        pass
+
+
+    def stop(self, env, upgrade_type=None):
+        pass
+
+
+    def status(self, env):
+        pass
+
+
+    def pre_upgrade_restart(self, env, upgrade_type=None):
+        pass
+
+
+    def security_status(self, env):
+        pass
+
+
+if __name__ == "__main__":
+    HiveServerInteractive().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b22aa2e4/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/hive-interactive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/hive-interactive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/hive-interactive-site.xml
new file mode 100644
index 0000000..76a9724
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/hive-interactive-site.xml
@@ -0,0 +1,2053 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>hive.cbo.enable</name>
+    <value>true</value>
+    <description>Flag to control enabling Cost Based Optimizations using Calcite framework.</description>
+    <display-name>Enable Cost Based Optimizer</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.zookeeper.quorum</name>
+    <value>localhost:2181</value>
+    <description>List of ZooKeeper servers to talk to. This is needed for: 1.
+      Read/write locks - when hive.lock.manager is set to
+      org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager,
+      2. When HiveServer2 supports service discovery via Zookeeper.</description>
+    <value-attributes>
+      <type>multiLine</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.metastore.connect.retries</name>
+    <value>24</value>
+    <description>Number of retries while opening a connection to metastore</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.failure.retries</name>
+    <value>24</value>
+    <description>Number of retries upon failure of Thrift metastore calls</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.client.connect.retry.delay</name>
+    <value>5s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Number of seconds for the client to wait between consecutive connection attempts
+    </description>
+  </property>
+
+ <property>
+    <name>hive.heapsize</name>
+    <value>1024</value>
+    <display-name>HiveServer2 heap size</display-name>
+    <deleted>true</deleted>
+    <description>Hive Java heap size</description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>ambari.hive.db.schema.name</name>
+    <value>hive</value>
+    <display-name>Database Name</display-name>
+    <description>Database name used as the Hive Metastore</description>
+    <value-attributes>
+      <type>database</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value>jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true</value>
+    <display-name>Database URL</display-name>
+    <description>JDBC connect string for a JDBC metastore</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionDriverName</name>
+    <value>com.mysql.jdbc.Driver</value>
+    <display-name>JDBC Driver Class</display-name>
+    <description>Driver class name for a JDBC metastore</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <name>hive_database</name>
+        <type>hive-env</type>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionUserName</name>
+    <value>hive</value>
+    <display-name>Database Username</display-name>
+    <description>username to use against metastore database</description>
+    <value-attributes>
+      <type>db_user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property require-input="true">
+    <name>javax.jdo.option.ConnectionPassword</name>
+    <value></value>
+    <property-type>PASSWORD</property-type>
+    <display-name>Database Password</display-name>
+    <description>password to use against metastore database</description>
+    <value-attributes>
+      <type>password</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value>jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true</value>
+    <display-name>Database URL</display-name>
+    <description>JDBC connect string for a JDBC metastore</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <name>hive_database</name>
+        <type>hive-env</type>
+      </property>
+      <property>
+        <name>ambari.hive.db.schema.name</name>
+        <type>hive-site</type>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.metastore.server.max.threads</name>
+    <value>100000</value>
+    <description>Maximum number of worker threads in the Thrift server's pool.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.warehouse.dir</name>
+    <value>/apps/hive/warehouse</value>
+    <description>location of default database for the warehouse</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.sasl.enabled</name>
+    <value>false</value>
+    <description>If true, the metastore thrift interface will be secured with SASL.
+      Clients must authenticate with Kerberos.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.keytab.file</name>
+    <value>/etc/security/keytabs/hive.service.keytab</value>
+    <description>The path to the Kerberos Keytab file containing the metastore Thrift server's service principal.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.principal</name>
+    <value>hive/_HOST@EXAMPLE.COM</value>
+    <description>
+      The service principal for the metastore Thrift server.
+      The special string _HOST will be replaced automatically with the correct host name.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.cluster.delegation.token.store.zookeeper.znode</name>
+    <value>/hive/cluster/delegation</value>
+    <description>The root path for token store data.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.cache.pinobjtypes</name>
+    <value>Table,Database,Type,FieldSchema,Order</value>
+    <description>List of comma separated metastore object types that should be pinned in the cache</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.uris</name>
+    <value>thrift://localhost:9083</value>
+    <description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.pre.event.listeners</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
+    <description>List of comma separated listeners for metastore events.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.authorization.storage.checks</name>
+    <value>false</value>
+    <description>
+      Should the metastore do authorization checks against the underlying storage (usually hdfs)
+      for operations like drop-partition (disallow the drop-partition if the user in
+      question doesn't have permissions to delete the corresponding directory
+      on the storage).
+    </description>
+  </property>
+
+  <property>
+    <name>datanucleus.autoCreateSchema</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>datanucleus.fixedDatastore</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>1800s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      MetaStore Client socket timeout in seconds
+    </description>
+  </property>
+
+  <property>
+    <name>hive.metastore.execute.setugi</name>
+    <value>true</value>
+    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.enabled</name>
+    <value>false</value>
+    <description>enable or disable the Hive client authorization</description>
+    <display-name>Enable Authorization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_security_authorization</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory</value>
+    <description>
+      The Hive client authorization manager class name. The user defined authorization class should implement
+      interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.
+    </description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_security_authorization</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.cluster.delegation.token.store.class</name>
+    <value>org.apache.hadoop.hive.thrift.ZooKeeperTokenStore</value>
+    <description>The delegation token store implementation.
+      Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster.</description>
+  </property>
+
+  <property>
+    <name>hive.cluster.delegation.token.store.zookeeper.connectString</name>
+    <value>localhost:2181</value>
+    <description>The ZooKeeper token store connect string.</description>
+  </property>
+
+  <property>
+    <name>hive.security.metastore.authorization.auth.reads</name>
+    <value>true</value>
+    <description>If this is true, metastore authorizer authorizes read actions on database, table</description>
+  </property>
+
+  <property>
+    <name>hive.server2.logging.operation.log.location</name>
+    <value>/tmp/hive/operation_logs</value>
+    <description>Top level directory where operation logs are stored if logging functionality is enabled</description>
+  </property>
+
+  <property>
+    <name>hive.server2.logging.operation.enabled</name>
+    <value>true</value>
+    <description>When true, HS2 will save operation logs</description>
+  </property>
+
+  <property>
+    <name>hive.security.metastore.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator</value>
+    <description>
+      authenticator manager class name to be used in the metastore for authentication.
+      The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.security.metastore.authorization.manager</name>
+    <display-name>Hive Authorization Manager</display-name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>
+      authorization manager class name to be used in the metastore for authorization.
+      The user defined authorization class should implement interface
+      org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
+    </description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_security_authorization</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.security.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
+    <description>
+      hive client authenticator manager class name. The user defined authenticator should implement
+      interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
+    </description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_security_authorization</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.doAs</name>
+    <value>true</value>
+    <description>
+      Setting this property to true will have HiveServer2 execute
+      Hive operations as the user making the calls to it.
+    </description>
+    <display-name>Run as end user instead of Hive user</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_security_authorization</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.user.install.directory</name>
+    <value>/user/</value>
+    <description>
+      If hive (in tez mode only) cannot find a usable hive jar in "hive.jar.directory",
+      it will upload the hive jar to "hive.user.install.directory/user.name"
+      and use it to run queries.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.conf.restricted.list</name>
+    <value>hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role</value>
+    <description>Comma separated list of configuration options which are immutable at runtime</description>
+  </property>
+
+  <property>
+    <name>hive.server2.use.SSL</name>
+    <value>false</value>
+    <description/>
+    <display-name>Use SSL</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.table.type.mapping</name>
+    <value>CLASSIC</value>
+    <description>
+      Expects one of [classic, hive].
+      This setting reflects how HiveServer2 will report the table types for JDBC and other
+      client implementations that retrieve the available tables and supported table types
+      HIVE : Exposes Hive's native table types like MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW
+      CLASSIC : More generic types like TABLE and VIEW
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.impersonation</name>
+    <value>true</value>
+    <deleted>true</deleted>
+    <description>Enable user impersonation for HiveServer2</description>
+  </property>
+
+  <property>
+    <name>fs.hdfs.impl.disable.cache</name>
+    <value>true</value>
+    <description>Disable HDFS filesystem cache.</description>
+  </property>
+
+  <property>
+    <name>fs.file.impl.disable.cache</name>
+    <value>true</value>
+    <description>Disable local filesystem cache.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.scratchdir</name>
+    <value>/tmp/hive</value>
+    <description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/&lt;username&gt; is created, with ${hive.scratch.dir.permission}.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.submit.local.task.via.child</name>
+    <value>true</value>
+    <description>
+      Determines whether local tasks (typically mapjoin hashtable generation phase) runs in
+      separate JVM (true recommended) or not.
+      Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.compress.intermediate</name>
+    <value>false</value>
+    <description>
+      This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed.
+      The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.reducers.bytes.per.reducer</name>
+    <value>67108864</value>
+    <description>Defines the size per reducer. For example, if it is set to 64M, given 256M input size, 4 reducers will be used.</description>
+    <display-name>Data per Reducer</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>64</minimum>
+      <maximum>4294967296</maximum>
+      <unit>B</unit>
+      <step-increment></step-increment>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.exec.reducers.max</name>
+    <value>1009</value>
+    <description>
+      max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is
+      negative, Hive will use this one as the max number of reducers when automatically determine number of reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.compress.output</name>
+    <value>false</value>
+    <description>
+      This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed.
+      The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.submitviachild</name>
+    <value>false</value>
+    <description/>
+  </property>
+
+  <property>
+    <name>hive.enforce.bucketing</name>
+    <value>true</value>
+    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+    <display-name>Enforce bucketing</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.enforce.sorting</name>
+    <value>true</value>
+    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.enforce.sortmergebucketmapjoin</name>
+    <value>true</value>
+    <description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?</description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr</name>
+    <value>true</value>
+    <description>Whether to use map-side aggregation in Hive Group By queries</description>
+  </property>
+
+  <property>
+    <name>hive.mapjoin.optimized.hashtable</name>
+    <value>true</value>
+    <description>
+      Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,
+      because memory-optimized hashtable cannot be serialized.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.smbjoin.cache.rows</name>
+    <value>10000</value>
+    <description>How many rows with the same key value should be cached in memory per smb joined table.</description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr.hash.percentmemory</name>
+    <value>0.5</value>
+    <description>Portion of total memory to be used by map-side group aggregation hash table</description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr.hash.force.flush.memory.threshold</name>
+    <value>0.9</value>
+    <description>
+      The max memory to be used by map-side group aggregation hash table.
+      If the memory usage is higher than this number, force to flush data
+    </description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr.hash.min.reduction</name>
+    <value>0.5</value>
+    <description>
+      Hash aggregation will be turned off if the ratio between hash  table size and input rows is bigger than this number.
+      Set to 1 to make sure hash aggregation is never turned off.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.merge.mapfiles</name>
+    <value>true</value>
+    <description>Merge small files at the end of a map-only job</description>
+  </property>
+
+  <property>
+    <name>hive.merge.mapredfiles</name>
+    <value>false</value>
+    <description>Merge small files at the end of a map-reduce job</description>
+  </property>
+
+  <property>
+    <name>hive.merge.tezfiles</name>
+    <value>false</value>
+    <description>Merge small files at the end of a Tez DAG</description>
+  </property>
+
+  <property>
+    <name>hive.merge.size.per.task</name>
+    <value>256000000</value>
+    <description>Size of merged files at the end of the job</description>
+  </property>
+
+  <property>
+    <name>hive.merge.smallfiles.avgsize</name>
+    <value>16000000</value>
+    <description>
+      When the average output file size of a job is less than this number, Hive will start an additional
+      map-reduce job to merge the output files into bigger files. This is only done for map-only jobs
+      if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.merge.rcfile.block.level</name>
+    <value>true</value>
+    <description/>
+  </property>
+
+  <property>
+    <name>hive.merge.orcfile.stripe.level</name>
+    <value>true</value>
+    <description>
+      When hive.merge.mapfiles or hive.merge.mapredfiles is enabled while writing a
+      table with ORC file format, enabling this config will do stripe level fast merge
+      for small ORC files. Note that enabling this config will not honor padding tolerance
+      config (hive.exec.orc.block.padding.tolerance).
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.orc.default.stripe.size</name>
+    <value>67108864</value>
+    <description>Define the default ORC stripe size</description>
+    <display-name>Default ORC Stripe Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>8388608</minimum>
+      <maximum>268435456</maximum>
+      <unit>B</unit>
+      <increment-step>8388608</increment-step>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin</name>
+    <value>true</value>
+    <description>Whether to try bucket mapjoin</description>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+    <value>false</value>
+    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
+      of buckets, a sort-merge join can be performed by setting this parameter as true.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>Whether speculative execution for reducers should be turned on. </description>
+  </property>
+
+  <property>
+    <name>hive.exec.dynamic.partition</name>
+    <value>true</value>
+    <description>Whether or not to allow dynamic partitions in DML/DDL.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.dynamic.partition.mode</name>
+    <value>nonstrict</value>
+    <description>
+      In strict mode, the user must specify at least one static partition
+      in case the user accidentally overwrites all partitions.
+      NonStrict allows all partitions of a table to be dynamic.
+    </description>
+    <display-name>Allow all partitions to be Dynamic</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>nonstrict</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>strict</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.exec.max.dynamic.partitions.pernode</name>
+    <value>2000</value>
+    <description>Maximum number of dynamic partitions allowed to be created in each mapper/reducer node.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.max.created.files</name>
+    <value>100000</value>
+    <description>Maximum number of HDFS files created by all mappers/reducers in a MapReduce job.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.max.dynamic.partitions</name>
+    <value>5000</value>
+    <description>Maximum number of dynamic partitions allowed to be created in total.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join</name>
+    <value>true</value>
+    <description>Whether Hive enables the optimization about converting common join into mapjoin based on the input file size</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join</name>
+    <value>true</value>
+    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join.</description>
+  </property>
+
+  <property>
+    <name>hive.optimize.constant.propagation</name>
+    <value>true</value>
+    <description>Whether to enable constant propagation optimizer</description>
+  </property>
+
+  <property>
+    <name>hive.optimize.metadataonly</name>
+    <value>true</value>
+    <description/>
+  </property>
+
+  <property>
+    <name>hive.optimize.null.scan</name>
+    <value>true</value>
+    <description>Dont scan relations which are guaranteed to not generate any rows</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join.to.mapjoin</name>
+    <value>false</value>
+    <description>
+      If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join,
+      this parameter decides whether each table should be tried as a big table, and effectively a map-join should be
+      tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the
+      big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a
+      sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted
+      and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table
+      with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster
+      if the complete small table can fit in memory, and a map-join can be performed.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
+    <value>true</value>
+    <deleted>true</deleted>
+    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask</name>
+    <value>true</value>
+    <description>
+      Whether Hive enables the optimization about converting common join into mapjoin based on the input file size.
+      If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
+      specified size, the join is directly converted to a mapjoin (there is no conditional task).
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>52428800</value>
+    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
+      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
+      converted to a mapjoin(there is no conditional task).
+    </description>
+    <display-name>For Map Join, per Map memory threshold</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>8192</minimum>
+      <maximum>17179869184</maximum>
+      <unit>B</unit>
+      <step-increment></step-increment>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.tez.container.size</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication.min.reducer</name>
+    <value>4</value>
+    <description>
+      Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
+      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
+      The optimization will be automatically disabled if number of reducers would be less than specified value.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.sort.dynamic.partition</name>
+    <value>false</value>
+    <description>
+      When enabled dynamic partitioning column will be globally sorted.
+      This way we can keep only one record writer open for each partition value
+      in the reducer thereby reducing the memory pressure on reducers.
+    </description>
+    <display-name>Sort Partitions Dynamically</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.optimize.mapjoin.mapreduce</name>
+    <value>true</value>
+    <deleted>true</deleted>
+    <description>If hive.auto.convert.join is off, this parameter does not take
+      affect. If it is on, and if there are map-join jobs followed by a map-reduce
+      job (for e.g a group by), each map-only job is merged with the following
+      map-reduce job.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapjoin.bucket.cache.size</name>
+    <value>10000</value>
+    <description>
+      Size per reducer.The default is 1G, i.e if the input size is 10G, it
+      will use 10 reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.execution.enabled</name>
+    <value>true</value>
+    <description>
+      This flag should be set to true to enable vectorized mode of query execution.
+      The default value is false.
+    </description>
+    <display-name>Enable Vectorization and Map Vectorization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication</name>
+    <value>true</value>
+    <description>
+      Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
+      This should always be set to true. Since it is a new feature, it has been made configurable.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.index.filter</name>
+    <value>true</value>
+    <description>Whether to enable automatic use of indexes</description>
+    <display-name>Push Filters to Storage</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.execution.engine</name>
+    <value>tez</value>
+    <description>
+      Expects one of [mr, tez].
+      Chooses execution engine. Options are: mr (Map reduce, default) or tez (hadoop 2 only)
+    </description>
+    <display-name>Execution Engine</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>mr</value>
+          <label>MapReduce</label>
+        </entry>
+        <entry>
+          <value>tez</value>
+          <label>TEZ</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.exec.post.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>
+      Comma-separated list of post-execution hooks to be invoked for each statement.
+      A post-execution hook is specified as the name of a Java class which implements the
+      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+    </description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_timeline_logging_enabled</name>
+      </property>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.server.http.port</name>
+      </property>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.server.https.port</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.exec.pre.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>
+      Comma-separated list of pre-execution hooks to be invoked for each statement.
+      A pre-execution hook is specified as the name of a Java class which implements the
+      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+    </description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_timeline_logging_enabled</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.exec.failure.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>
+      Comma-separated list of on-failure hooks to be invoked for each statement.
+      An on-failure hook is specified as the name of Java class which implements the
+      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+    </description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_timeline_logging_enabled</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.exec.parallel</name>
+    <value>false</value>
+    <description>Whether to execute jobs in parallel</description>
+  </property>
+
+  <property>
+    <name>hive.exec.parallel.thread.number</name>
+    <value>8</value>
+    <description>How many jobs at most can be executed in parallel</description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.maxentries</name>
+    <value>100000</value>
+    <description>
+      Max number of entries in the vector group by aggregation hashtables.
+      Exceeding this will trigger a flush irrelevant of memory pressure condition.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.tez.smb.number.waves</name>
+    <value>0.5</value>
+    <description>The number of waves in which to run the SMB join. Account for cluster being occupied. Ideally should be 1 wave.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.dynamic.partition.pruning.max.data.size</name>
+    <value>104857600</value>
+    <description>Maximum total data size of events in dynamic pruning.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.dynamic.partition.pruning.max.event.size</name>
+    <value>1048576</value>
+    <description>Maximum size of events sent by processors in dynamic pruning. If this size is crossed no pruning will take place.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.dynamic.partition.pruning</name>
+    <value>true</value>
+    <description>When dynamic pruning is enabled, joins on partition keys will be processed by sending events from the processing vertices to the tez application master. These events will be used to prune unnecessary partitions.</description>
+    <display-name>Allow dynamic partition pruning</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.tez.min.partition.factor</name>
+    <value>0.25</value>
+    <description>
+      When auto reducer parallelism is enabled this factor will be used to put a lower limit to the number
+      of reducers that tez specifies.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.tez.max.partition.factor</name>
+    <value>2.0</value>
+    <description>When auto reducer parallelism is enabled this factor will be used to over-partition data in shuffle edges.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.auto.reducer.parallelism</name>
+    <value>false</value>
+    <description>
+      Turn on Tez' auto reducer parallelism feature. When enabled, Hive will still estimate data sizes
+      and set parallelism estimates. Tez will sample source vertices' output sizes and adjust the estimates at runtime as
+      necessary.
+    </description>
+    <display-name>Allow dynamic numbers of reducers</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.convert.join.bucket.mapjoin.tez</name>
+    <value>false</value>
+    <description>
+      Whether joins can be automatically converted to bucket map joins in hive
+      when tez is used as the execution engine.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.prewarm.numcontainers</name>
+    <value>3</value>
+    <description>Controls the number of containers to prewarm for Tez (Hadoop 2 only)</description>
+    <display-name>Number of Containers Held</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1</minimum>
+      <maximum>20</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.prewarm.enabled</name>
+    <value>false</value>
+    <description>Enables container prewarm for Tez (Hadoop 2 only)</description>
+    <display-name>Hold Containers to Reduce Latency</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.checkinterval</name>
+    <value>4096</value>
+    <description>Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed.</description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.flush.percent</name>
+    <value>0.1</value>
+    <description>Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded.</description>
+  </property>
+
+  <property>
+    <name>hive.stats.autogather</name>
+    <value>true</value>
+    <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
+  </property>
+
+  <property>
+    <name>hive.stats.dbclass</name>
+    <value>fs</value>
+    <description>
+      Expects one of the pattern in [jdbc(:.*), hbase, counter, custom, fs].
+      The storage that stores temporary Hive statistics. Currently, jdbc, hbase, counter and custom type are supported.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.stats.fetch.partition.stats</name>
+    <value>true</value>
+    <description>
+      Annotation of operator tree with statistics information requires partition level basic
+      statistics like number of rows, data size and file size. Partition statistics are fetched from
+      metastore. Fetching partition statistics for each needed partition can be expensive when the
+      number of partitions is high. This flag can be used to disable fetching of partition statistics
+      from metastore. When this flag is disabled, Hive will make calls to filesystem to get file sizes
+      and will estimate the number of rows from row schema.
+    </description>
+    <display-name>Fetch partition stats at compiler</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.cbo.enable</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.zookeeper.client.port</name>
+    <value>2181</value>
+    <description>The port of ZooKeeper servers to talk to. If the list of Zookeeper servers specified in hive.zookeeper.quorum,does not contain port numbers, this value is used.</description>
+  </property>
+
+  <property>
+    <name>hive.zookeeper.namespace</name>
+    <value>hive_zookeeper_namespace</value>
+    <description>The parent node under which all ZooKeeper nodes are created.</description>
+  </property>
+
+  <property>
+    <name>hive.stats.fetch.column.stats</name>
+    <value>false</value>
+    <description>
+      Annotation of operator tree with statistics information requires column statistics.
+      Column statistics are fetched from metastore. Fetching column statistics for each needed column
+      can be expensive when the number of columns is high. This flag can be used to disable fetching
+      of column statistics from metastore.
+    </description>
+    <display-name>Fetch column stats at compiler</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+       <property>
+        <type>hive-site</type>
+        <name>hive.cbo.enable</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.tez.container.size</name>
+    <value>682</value>
+    <description>By default, Tez uses the java options from map tasks. Use this property to override that value.</description>
+    <display-name>Tez Container Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>682</minimum>
+      <maximum>6820</maximum>
+      <unit>MB</unit>
+      <increment-step>682</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.minimum-allocation-mb</name>
+      </property>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.maximum-allocation-mb</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.tez.input.format</name>
+    <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
+    <description>The default input format for Tez. Tez groups splits in the Application Master.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.java.opts</name>
+    <value>-server -Xmx545m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps</value>
+    <description>Java command line options for Tez. The -Xmx parameter value is generally 80% of hive.tez.container.size.</description>
+  </property>
+
+  <property>
+    <name>hive.compute.query.using.stats</name>
+    <value>true</value>
+    <description>
+      When set to true Hive will answer a few queries like count(1) purely using stats
+      stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.
+      For more advanced stats collection need to run analyze table queries.
+    </description>
+    <display-name>Compute simple queries using stats only</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.cbo.enable</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.exec.orc.default.compress</name>
+    <value>ZLIB</value>
+    <description>Define the default compression codec for ORC file</description>
+    <display-name>ORC Compression Algorithm</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>ZLIB</value>
+          <label>zlib Compression Library</label>
+        </entry>
+        <entry>
+          <value>SNAPPY</value>
+          <label>Snappy Compression Library</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.orc.splits.include.file.footer</name>
+    <value>false</value>
+    <description>
+      If turned on splits generated by orc will include metadata about the stripes in the file. This
+      data is read remotely (from the client or HS2 machine) and sent to all the tasks.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.orc.compute.splits.num.threads</name>
+    <value>10</value>
+    <description>How many threads orc should use to create splits in parallel.</description>
+  </property>
+
+  <property>
+    <name>hive.limit.optimize.enable</name>
+    <value>true</value>
+    <description>Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.cpu.vcores</name>
+    <value>-1</value>
+    <description>By default Tez will ask for however many cpus map-reduce is configured to use per container. This can be used to overwrite.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.log.level</name>
+    <value>INFO</value>
+    <description>
+      The log level to use for tasks executing as part of the DAG.
+      Used only if hive.tez.java.opts is used to configure Java options.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.limit.pushdown.memory.usage</name>
+    <value>0.04</value>
+    <description>The max memory to be used for hash in RS operator for top K selection.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.orc.encoding.strategy</name>
+    <value>SPEED</value>
+    <description>
+      Define the encoding strategy to use while writing data. Changing this
+      will only affect the light weight encoding for integers. This flag will not change
+      the compression level of higher level compression codec (like ZLIB). Possible
+      options are SPEED and COMPRESSION.
+    </description>
+    <display-name>ORC Encoding Strategy</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>SPEED</value>
+          <label>Speed</label>
+        </entry>
+        <entry>
+          <value>COMPRESSION</value>
+          <label>Compression</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_exec_orc_storage_strategy</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.exec.orc.compression.strategy</name>
+    <value>SPEED</value>
+    <description>
+      Define the compression strategy to use while writing data. This changes the
+      compression level of higher level compression codec (like ZLIB).
+    </description>
+    <display-name>ORC Compression Strategy</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>SPEED</value>
+          <label>Speed</label>
+        </entry>
+        <entry>
+          <value>COMPRESSION</value>
+          <label>Compression</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_exec_orc_storage_strategy</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.vectorized.execution.reduce.enabled</name>
+    <value>false</value>
+    <description>
+      This flag should be set to true to enable vectorized mode of the reduce-side of
+      query execution.
+    </description>
+    <display-name>Enable Reduce Vectorization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication.ldap.baseDN</name>
+    <property-type>DONT_ADD_ON_UPGRADE</property-type>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication.kerberos.principal</name>
+    <value>hive/_HOST@EXAMPLE.COM</value>
+    <property-type>DONT_ADD_ON_UPGRADE</property-type>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.server2.custom.authentication.class</name>
+    <property-type>DONT_ADD_ON_UPGRADE</property-type>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication.kerberos.keytab</name>
+    <value>/etc/security/keytabs/hive.service.keytab</value>
+    <property-type>DONT_ADD_ON_UPGRADE</property-type>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication.ldap.url</name>
+    <value> </value>
+    <property-type>DONT_ADD_ON_UPGRADE</property-type>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.default.queues</name>
+    <display-name>Default query queues</display-name>
+    <value>default</value>
+    <description>
+      A list of comma separated values corresponding to YARN queues of the same name.
+      When HiveServer2 is launched in Tez mode, this configuration needs to be set
+      for multiple Tez sessions to run in parallel on the cluster.
+    </description>
+    <value-attributes>
+      <type>combo</type>
+      <entries>
+        <entry>
+          <value>default</value>
+          <label>Default</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1+</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.sessions.per.default.queue</name>
+    <value>1</value>
+    <description>
+      A positive integer that determines the number of Tez sessions that should be
+      launched on each of the queues specified by "hive.server2.tez.default.queues".
+      Determines the parallelism on each queue.
+    </description>
+    <display-name>Session per queue</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1</minimum>
+      <maximum>10</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.initialize.default.sessions</name>
+    <value>false</value>
+    <description>
+      This flag is used in HiveServer2 to enable a user to use HiveServer2 without
+      turning on Tez for HiveServer2. The user could potentially want to run queries
+      over Tez without the pool of sessions.
+    </description>
+    <display-name>Start Tez session at Initialization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.txn.manager</name>
+    <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
+    <description/>
+    <display-name>Transaction Manager</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
+          <label>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager (off)</label>
+        </entry>
+        <entry>
+          <value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
+          <label>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager (on)</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.txn.timeout</name>
+    <value>300</value>
+    <description>Time after which transactions are declared aborted if the client has not sent a heartbeat, in seconds.</description>
+  </property>
+
+  <property>
+    <name>hive.txn.max.open.batch</name>
+    <value>1000</value>
+    <description>
+      Maximum number of transactions that can be fetched in one call to open_txns().
+      Increasing this will decrease the number of delta files created when
+      streaming data into Hive.  But it will also increase the number of
+      open transactions at any given time, possibly impacting read performance.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.cli.print.header</name>
+    <value>false</value>
+    <description>
+      Whether to print the names of the columns in query output.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.support.concurrency</name>
+    <value>false</value>
+    <description>
+      Support concurrency and use locks, needed for Transactions. Requires Zookeeper.
+    </description>
+    <display-name>Use Locking</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.compactor.initiator.on</name>
+    <value>false</value>
+    <description>Whether to run the compactor's initiator thread in this metastore instance or not. If there is more than one instance of the thrift metastore this should be set to true on only one instance. Setting true on only one host can be achieved by creating a config-group containing the metastore host, and overriding the default value to true in it.</description>
+    <display-name>Run Compactor</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.compactor.worker.threads</name>
+    <value>0</value>
+    <description>Number of compactor worker threads to run on this metastore instance. Can be different values on different metastore instances.</description>
+    <display-name>Number of threads used by Compactor</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>20</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.compactor.worker.timeout</name>
+    <value>86400L</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Time before a given compaction in working state is declared a failure
+      and returned to the initiated state.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.compactor.check.interval</name>
+    <value>300L</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Time between checks to see if any partitions need compacted.
+      This should be kept high because each check for compaction requires many calls against the NameNode.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.fetch.task.conversion</name>
+    <value>more</value>
+    <description>
+      Expects one of [none, minimal, more].
+      Some select queries can be converted to single FETCH task minimizing latency.
+      Currently the query should be single sourced not having any subquery and should not have
+      any aggregations or distincts (which incurs RS), lateral views and joins.
+      0. none : disable hive.fetch.task.conversion
+      1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only
+      2. more    : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)
+    </description>
+  </property>
+
+  <property>
+    <name>hive.fetch.task.aggr</name>
+    <value>false</value>
+    <description>
+      Aggregation queries with no group-by clause (for example, select count(*) from src) execute
+      final aggregations in single reduce task. If this is set true, Hive delegates final aggregation
+      stage to fetch task, possibly decreasing the query time.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.fetch.task.conversion.threshold</name>
+    <value>1073741824</value>
+    <description>
+      Input threshold for applying hive.fetch.task.conversion. If target table is native, input length
+      is calculated by summation of file lengths. If it's not native, storage handler for the table
+      can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.compactor.delta.num.threshold</name>
+    <value>10</value>
+    <description>Number of delta files that must exist in a directory before the compactor will attempt a minor compaction.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.delta.pct.threshold</name>
+    <value>0.1f</value>
+    <description>Percentage (by size) of base that deltas can be before major compaction is initiated.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.abortedtxn.threshold</name>
+    <value>1000</value>
+    <description>Number of aborted transactions involving a particular table or partition before major compaction is initiated.</description>
+  </property>
+
+  <property>
+    <name>datanucleus.cache.level2.type</name>
+    <value>none</value>
+    <description>Determines caching mechanism DataNucleus L2 cache will use. It is strongly recommended to use default value of 'none' as other values may cause consistency errors in Hive.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.port</name>
+    <value>10001</value>
+    <display-name>HiveServer2 Port</display-name>
+    <description>
+      TCP port number to listen on, default 10000.
+    </description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>int</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.allow.user.substitution</name>
+    <value>true</value>
+    <description>Allow alternate user to be specified as part of HiveServer2 open connection request.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.max.worker.threads</name>
+    <value>500</value>
+    <description>Maximum number of Thrift worker threads</description>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.sasl.qop</name>
+    <value>auth</value>
+    <description>
+      Expects one of [auth, auth-int, auth-conf].
+      Sasl QOP value; Set it to one of following values to enable higher levels of
+      protection for HiveServer2 communication with clients.
+      "auth" - authentication only (default)
+      "auth-int" - authentication plus integrity protection
+      "auth-conf" - authentication plus integrity and confidentiality protection
+      This is applicable only if HiveServer2 is configured to use Kerberos authentication.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication.spnego.principal</name>
+    <value>/etc/security/keytabs/spnego.service.keytab</value>
+    <description>
+      SPNego service principal, optional,
+      typical value would look like HTTP/_HOST@EXAMPLE.COM
+      SPNego service principal would be used by HiveServer2 when Kerberos security is enabled
+      and HTTP transport mode is used.
+      This needs to be set only if SPNEGO is to be used in authentication.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication.spnego.keytab</name>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
+    <description>
+      keytab file for SPNego principal, optional,
+      typical value would look like /etc/security/keytabs/spnego.service.keytab,
+      This keytab would be used by HiveServer2 when Kerberos security is enabled and
+      HTTP transport mode is used.
+      This needs to be set only if SPNEGO is to be used in authentication.
+      SPNego authentication would be honored only if valid
+      hive.server2.authentication.spnego.principal
+      and
+      hive.server2.authentication.spnego.keytab
+      are specified.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication</name>
+    <description>Authentication mode, default NONE. Options are NONE, NOSASL, KERBEROS, LDAP, PAM and CUSTOM</description>
+    <value>NONE</value>
+    <display-name>HiveServer2 Authentication</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>NONE</value>
+          <label>None</label>
+        </entry>
+        <entry>
+          <value>LDAP</value>
+          <label>LDAP</label>
+        </entry>
+        <entry>
+          <value>KERBEROS</value>
+          <label>Kerberos</label>
+        </entry>
+        <entry>
+          <value>PAM</value>
+          <label>PAM</label>
+        </entry>
+        <entry>
+          <value>CUSTOM</value>
+          <label>Custom</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.support.dynamic.service.discovery</name>
+    <value>true</value>
+    <description>Whether HiveServer2 supports dynamic service discovery for its clients.
+      To support this, each instance of HiveServer2 currently uses ZooKeeper to register itself,
+      when it is brought up. JDBC/ODBC clients should use the ZooKeeper ensemble: hive.zookeeper.quorum
+      in their connection string.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.zookeeper.namespace</name>
+    <value>hiveserver2</value>
+    <description>The parent node in ZooKeeper used by HiveServer2 when supporting dynamic service discovery.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.http.port</name>
+    <value>10003</value>
+    <description>Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'http'.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.transport.mode</name>
+    <value>binary</value>
+    <description>
+      Expects one of [binary, http].
+      Transport mode of HiveServer2.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.default.fileformat</name>
+    <value>TextFile</value>
+    <description>Default file format for CREATE TABLE statement.</description>
+    <display-name>Default File Format</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>ORC</value>
+          <description>The Optimized Row Columnar (ORC) file format provides a highly efficient way to store Hive data. It was designed to overcome limitations of the other Hive file formats. Using ORC files improves performance when Hive is reading, writing, and processing data.</description>
+        </entry>
+        <entry>
+          <value>TextFile</value>
+          <description>Text file format saves Hive data as normal text.</description>
+        </entry>
+      </entries>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>atlas.cluster.name</name>
+    <value>primary</value>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.enableTLS</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>atlas.rest.address</name>
+    <value>http://localhost:21000</value>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.enableTLS</name>
+      </property>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.server.http.port</name>
+      </property>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.server.https.port</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.default.fileformat.managed</name>
+    <value>TextFile</value>
+    <description>
+      Default file format for CREATE TABLE statement applied to managed tables only.
+      External tables will be created with default file format. Leaving this null
+      will result in using the default file format for all tables.
+    </description>
+  </property>
+
+  <property>
+    <name>datanucleus.rdbms.datastoreAdapterClassName</name>
+    <description>Datanucleus Class, This property used only when hive db is SQL Anywhere</description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_database</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>atlas.hook.hive.maxThreads</name>
+    <value>1</value>
+    <description>
+      Maximum number of threads used by Atlas hook.
+    </description>
+  </property>
+
+  <property>
+    <name>atlas.hook.hive.minThreads</name>
+    <value>1</value>
+    <description>
+      Minimum number of threads maintained by Atlas hook.
+    </description>
+  </property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b22aa2e4/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/metainfo.xml
index 82bdc92..ee973ed 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/metainfo.xml
@@ -21,6 +21,55 @@
     <service>
       <name>HIVE</name>
       <version>1.2.1.2.4</version>
+        <components>
+          <component>
+            <name>HIVE_SERVER_INTERACTIVE</name>
+            <displayName>HiveServer2 Interactive</displayName>
+            <category>MASTER</category>
+            <cardinality>1</cardinality>
+            <versionAdvertised>true</versionAdvertised>
+            <clientsToUpdateConfigs></clientsToUpdateConfigs>
+            <dependencies>
+              <dependency>
+                <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+                <scope>cluster</scope>
+                <auto-deploy>
+                  <enabled>true</enabled>
+                  <co-locate>HIVE/HIVE_SERVER</co-locate>
+                </auto-deploy>
+              </dependency>
+              <dependency>
+                <name>YARN/YARN_CLIENT</name>
+                <scope>host</scope>
+                <auto-deploy>
+                  <enabled>true</enabled>
+                </auto-deploy>
+              </dependency>
+              <dependency>
+                <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+                <scope>host</scope>
+                <auto-deploy>
+                  <enabled>true</enabled>
+                </auto-deploy>
+              </dependency>
+              <dependency>
+                <name>TEZ/TEZ_CLIENT</name>
+                <scope>host</scope>
+                <auto-deploy>
+                  <enabled>true</enabled>
+                </auto-deploy>
+              </dependency>
+              </dependencies>
+                <commandScript>
+                  <script>scripts/hive_server_interactive.py</script>
+                  <scriptType>PYTHON</scriptType>
+                </commandScript>
+                <configuration-dependencies>
+                  <config-type>hive-site</config-type>
+                  <config-type>hive-interactive-site</config-type>
+                </configuration-dependencies>
+          </component>
+        </components>
     </service>
   </services>
 </metainfo>


[24/50] [abbrv] ambari git commit: AMBARI-14772. Added ability to set rack information in the add host template (Laszlo Puskas via rlevas)

Posted by jo...@apache.org.
AMBARI-14772. Added ability to set rack information in the add host template  (Laszlo Puskas via rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9d8675ad
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9d8675ad
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9d8675ad

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 9d8675ade510fa9617a48fa761cd7a829c40ef0b
Parents: 0a9a3aa
Author: Laszlo Puskas <lp...@hortonworks.com>
Authored: Fri Feb 12 11:35:32 2016 -0500
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Fri Feb 12 11:35:44 2016 -0500

----------------------------------------------------------------------
 .../internal/HostResourceProvider.java          | 12 ++++-
 .../internal/ScaleClusterRequest.java           | 28 +++++++++--
 .../server/topology/ClusterTopologyImpl.java    | 51 +++++++++++++-------
 3 files changed, 67 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9d8675ad/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
index da73f15..6251f07 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
@@ -145,6 +145,11 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
   public static final String HOST_PREDICATE_PROPERTY_ID =
       PropertyHelper.getPropertyId(null, "host_predicate");
 
+  //todo use the same json structure for cluster host addition (cluster template and upscale)
+  public static final String HOST_RACK_INFO_NO_CATEGORY_PROPERTY_ID =
+      PropertyHelper.getPropertyId(null, "rack_info");
+
+
   private static Set<String> pkPropertyIds =
       new HashSet<String>(Arrays.asList(new String[]{
           HOST_NAME_PROPERTY_ID}));
@@ -354,6 +359,7 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
     //todo: constants
     baseUnsupported.remove(HOST_COUNT_PROPERTY_ID);
     baseUnsupported.remove(HOST_PREDICATE_PROPERTY_ID);
+    baseUnsupported.remove(HOST_RACK_INFO_NO_CATEGORY_PROPERTY_ID);
 
     return checkConfigPropertyIds(baseUnsupported, "Hosts");
   }
@@ -406,7 +412,11 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
         (String) properties.get(HOST_CLUSTER_NAME_PROPERTY_ID),
         null);
     hostRequest.setPublicHostName((String) properties.get(HOST_PUBLIC_NAME_PROPERTY_ID));
-    hostRequest.setRackInfo((String) properties.get(HOST_RACK_INFO_PROPERTY_ID));
+
+    String rackInfo = (String) ((null != properties.get(HOST_RACK_INFO_PROPERTY_ID))? properties.get(HOST_RACK_INFO_PROPERTY_ID):
+            properties.get(HOST_RACK_INFO_NO_CATEGORY_PROPERTY_ID));
+
+    hostRequest.setRackInfo(rackInfo);
     hostRequest.setBlueprintName((String) properties.get(BLUEPRINT_PROPERTY_ID));
     hostRequest.setHostGroupName((String) properties.get(HOSTGROUP_PROPERTY_ID));
     

http://git-wip-us.apache.org/repos/asf/ambari/blob/9d8675ad/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java
index d784f1d..b5d2f9d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java
@@ -19,6 +19,11 @@
 
 package org.apache.ambari.server.controller.internal;
 
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.ambari.server.api.predicate.InvalidQueryException;
 import org.apache.ambari.server.stack.NoSuchStackException;
 import org.apache.ambari.server.topology.Blueprint;
@@ -26,17 +31,16 @@ import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
 import org.apache.ambari.server.topology.TopologyValidator;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A request for a scaling an existing cluster.
  */
 public class ScaleClusterRequest extends BaseClusterRequest {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(ScaleClusterRequest.class);
+
   /**
    * cluster name
    */
@@ -175,9 +179,23 @@ public class ScaleClusterRequest extends BaseClusterRequest {
         throw new InvalidTopologyTemplateException("Invalid host group specified in request: " + hgName);
       }
       hostGroupInfo.addHost(hostName);
+      hostGroupInfo.addHostRackInfo(hostName, processRackInfo(properties));
+    }
+  }
+
+  private String processRackInfo(Map<String, Object> properties) {
+    String rackInfo = null;
+    if (properties.containsKey(HostResourceProvider.HOST_RACK_INFO_PROPERTY_ID)) {
+      rackInfo = (String) properties.get(HostResourceProvider.HOST_RACK_INFO_PROPERTY_ID);
+    } else if (properties.containsKey(HostResourceProvider.HOST_RACK_INFO_NO_CATEGORY_PROPERTY_ID)) {
+      rackInfo = (String) properties.get(HostResourceProvider.HOST_RACK_INFO_NO_CATEGORY_PROPERTY_ID);
+    } else {
+      LOGGER.debug("No rack info provided");
     }
+    return rackInfo;
   }
 
+
   /**
    * Parse blueprint.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/9d8675ad/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
index 05dc504..af716a0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
@@ -19,12 +19,6 @@
 
 package org.apache.ambari.server.topology;
 
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.internal.ProvisionAction;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -33,6 +27,12 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.RequestStatusResponse;
+import org.apache.ambari.server.controller.internal.ProvisionAction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  * Represents a cluster topology.
  * Topology includes the the associated blueprint, cluster configuration and hostgroup -> host mapping.
@@ -280,40 +280,44 @@ public class ClusterTopologyImpl implements ClusterTopology {
     return ambariContext;
   }
 
-  private void registerHostGroupInfo(Map<String, HostGroupInfo> groupInfoMap) throws InvalidTopologyException {
-    checkForDuplicateHosts(groupInfoMap);
-    for (HostGroupInfo hostGroupInfo : groupInfoMap.values() ) {
-      String hostGroupName = hostGroupInfo.getHostGroupName();
+  private void registerHostGroupInfo(Map<String, HostGroupInfo> requestedHostGroupInfoMap) throws InvalidTopologyException {
+    LOG.debug("Registering requested host group information for {} hostgroups", requestedHostGroupInfoMap.size());
+    checkForDuplicateHosts(requestedHostGroupInfoMap);
+
+    for (HostGroupInfo requestedHostGroupInfo : requestedHostGroupInfoMap.values()) {
+      String hostGroupName = requestedHostGroupInfo.getHostGroupName();
+
       //todo: doesn't support using a different blueprint for update (scaling)
       HostGroup baseHostGroup = getBlueprint().getHostGroup(hostGroupName);
+
       if (baseHostGroup == null) {
         throw new IllegalArgumentException("Invalid host_group specified: " + hostGroupName +
             ".  All request host groups must have a corresponding host group in the specified blueprint");
       }
       //todo: split into two methods
-      HostGroupInfo existingHostGroupInfo = hostGroupInfoMap.get(hostGroupName);
-      if (existingHostGroupInfo == null) {
+      HostGroupInfo currentHostGroupInfo = hostGroupInfoMap.get(hostGroupName);
+      if (currentHostGroupInfo == null) {
         // blueprint host group config
         Configuration bpHostGroupConfig = baseHostGroup.getConfiguration();
         // parent config is BP host group config but with parent set to topology cluster scoped config
         Configuration parentConfiguration = new Configuration(bpHostGroupConfig.getProperties(),
             bpHostGroupConfig.getAttributes(), getConfiguration());
 
-        hostGroupInfo.getConfiguration().setParentConfiguration(parentConfiguration);
-        hostGroupInfoMap.put(hostGroupName, hostGroupInfo);
+        requestedHostGroupInfo.getConfiguration().setParentConfiguration(parentConfiguration);
+        hostGroupInfoMap.put(hostGroupName, requestedHostGroupInfo);
       } else {
         // Update.  Either add hosts or increment request count
-        if (! hostGroupInfo.getHostNames().isEmpty()) {
+        if (!requestedHostGroupInfo.getHostNames().isEmpty()) {
           try {
             // this validates that hosts aren't already registered with groups
-            addHostsToTopology(hostGroupInfo);
+            addHostsToTopology(requestedHostGroupInfo);
           } catch (NoSuchHostGroupException e) {
             //todo
             throw new InvalidTopologyException("Attempted to add hosts to unknown host group: " + hostGroupName);
           }
         } else {
-          existingHostGroupInfo.setRequestedCount(
-              existingHostGroupInfo.getRequestedHostCount() + hostGroupInfo.getRequestedHostCount());
+          currentHostGroupInfo.setRequestedCount(
+              currentHostGroupInfo.getRequestedHostCount() + requestedHostGroupInfo.getRequestedHostCount());
         }
         //todo: throw exception in case where request attempts to modify HG configuration in scaling operation
       }
@@ -322,10 +326,21 @@ public class ClusterTopologyImpl implements ClusterTopology {
 
   private void addHostsToTopology(HostGroupInfo hostGroupInfo) throws InvalidTopologyException, NoSuchHostGroupException {
     for (String host: hostGroupInfo.getHostNames()) {
+      registerRackInfo(hostGroupInfo, host);
       addHostToTopology(hostGroupInfo.getHostGroupName(), host);
     }
   }
 
+  private void registerRackInfo(HostGroupInfo hostGroupInfo, String host) {
+    synchronized (hostGroupInfoMap) {
+      HostGroupInfo cachedHGI = hostGroupInfoMap.get(hostGroupInfo.getHostGroupName());
+      if (null != cachedHGI) {
+        cachedHGI.addHostRackInfo(host, hostGroupInfo.getHostRackInfo().get(host));
+      }
+    }
+  }
+
+
   private void checkForDuplicateHosts(Map<String, HostGroupInfo> groupInfoMap) throws InvalidTopologyException {
     Set<String> hosts = new HashSet<String>();
     Set<String> duplicates = new HashSet<String>();


[25/50] [abbrv] ambari git commit: AMBARI-15029. Adding a Service results in deleting Config Group mappings (more than 1 CG present) (akovalenko)

Posted by jo...@apache.org.
AMBARI-15029. Adding a Service results in deleting Config Group mappings (more than 1 CG present) (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f7055ae7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f7055ae7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f7055ae7

Branch: refs/heads/branch-dev-patch-upgrade
Commit: f7055ae76a64f445d2845ce66f6ec925dc94f99d
Parents: 9d8675a
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Fri Feb 12 16:55:05 2016 +0200
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Fri Feb 12 20:01:58 2016 +0200

----------------------------------------------------------------------
 .../controllers/main/service/info/configs.js    |  4 +--
 ambari-web/app/controllers/wizard.js            |  4 ++-
 .../app/controllers/wizard/step7_controller.js  | 31 +++++++++++++-------
 ambari-web/app/routes/add_service_routes.js     |  1 +
 ambari-web/test/controllers/wizard_test.js      |  5 ++--
 5 files changed, 29 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f7055ae7/ambari-web/app/controllers/main/service/info/configs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/info/configs.js b/ambari-web/app/controllers/main/service/info/configs.js
index ae2939e..b6a434c 100644
--- a/ambari-web/app/controllers/main/service/info/configs.js
+++ b/ambari-web/app/controllers/main/service/info/configs.js
@@ -429,8 +429,8 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ConfigsLoader, A
           for (var prop in config.properties) {
             var fileName = App.config.getOriginalFileName(config.type);
             var serviceConfig = allConfigs.filterProperty('name', prop).findProperty('filename', fileName);
-            var value = App.config.formatPropertyValue(serviceConfig, config.properties[prop]);
             if (serviceConfig) {
+              var value = App.config.formatPropertyValue(serviceConfig, config.properties[prop]);
               var isFinal = !!(config.properties_attributes && config.properties_attributes.final && config.properties_attributes.final[prop]);
               if (self.get('selectedConfigGroup.isDefault') || configGroup.get('name') == self.get('selectedConfigGroup.name')) {
                 var overridePlainObject = {
@@ -444,7 +444,7 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ConfigsLoader, A
               }
             } else {
               var isEditable = self.get('canEdit') && configGroup.get('name') == self.get('selectedConfigGroup.name');
-              allConfigs.push(App.config.createCustomGroupConfig(prop, fileName, value, configGroup, isEditable));
+              allConfigs.push(App.config.createCustomGroupConfig(prop, fileName, config.properties[prop], configGroup, isEditable));
             }
           }
         });

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7055ae7/ambari-web/app/controllers/wizard.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard.js b/ambari-web/app/controllers/wizard.js
index 762149f..05ef68e 100644
--- a/ambari-web/app/controllers/wizard.js
+++ b/ambari-web/app/controllers/wizard.js
@@ -889,11 +889,13 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, App.ThemesMappingM
     var installedServiceNames = stepController.get('installedServiceNames') || [];
     var installedServiceNamesMap = installedServiceNames.toWickMap();
     stepController.get('stepConfigs').forEach(function (_content) {
-
       if (_content.serviceName === 'YARN') {
         _content.set('configs', App.config.textareaIntoFileConfigs(_content.get('configs'), 'capacity-scheduler.xml'));
       }
       _content.get('configs').forEach(function (_configProperties) {
+        if (!Em.isNone(_configProperties.get('group'))) {
+          return false;
+        }
         var configProperty = App.config.createDefaultConfig(
           _configProperties.get('name'),
           _configProperties.get('serviceName'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7055ae7/ambari-web/app/controllers/wizard/step7_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js b/ambari-web/app/controllers/wizard/step7_controller.js
index 7e96845..ee37427 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -479,14 +479,15 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
 
   loadServiceConfigGroupOverridesSuccess: function (data, opt, params) {
     data.items.forEach(function (config) {
+      var hostOverrideValue, hostOverrideIsFinal;
       var group = params.typeTagToGroupMap[config.type + "///" + config.tag];
       var properties = config.properties;
       for (var prop in properties) {
         var fileName = App.config.getOriginalFileName(config.type);
         var serviceConfig = !!params.configKeyToConfigMap[fileName] ? params.configKeyToConfigMap[fileName][prop] : false;
-        var hostOverrideValue = App.config.formatPropertyValue(serviceConfig, properties[prop]);
-        var hostOverrideIsFinal = !!(config.properties_attributes && config.properties_attributes.final && config.properties_attributes.final[prop]);
         if (serviceConfig) {
+          hostOverrideValue = App.config.formatPropertyValue(serviceConfig, properties[prop]);
+          hostOverrideIsFinal = !!(config.properties_attributes && config.properties_attributes.final && config.properties_attributes.final[prop]);
           // Value of this property is different for this host.
           if (!Em.get(serviceConfig, 'overrides')) Em.set(serviceConfig, 'overrides', []);
           serviceConfig.overrides.pushObject({value: hostOverrideValue, group: group, isFinal: hostOverrideIsFinal});
@@ -595,6 +596,9 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
       if (Em.isNone(serviceConfigProperty.get('isOverridable'))) {
         serviceConfigProperty.set('isOverridable', true);
       }
+      if (!Em.isNone(serviceConfigProperty.get('group'))) {
+        serviceConfigProperty.get('group.properties').pushObject(serviceConfigProperty);
+      }
       this._updateOverridesForConfig(serviceConfigProperty, component);
       this._updateIsEditableFlagForConfig(serviceConfigProperty, defaultGroupSelected);
 
@@ -1218,6 +1222,7 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
         service.set('configGroups', [App.ServiceConfigGroup.find(id)]);
       }
       else {
+        App.store.commit();
         App.store.loadMany(App.ServiceConfigGroup, serviceRawGroups);
         App.store.commit();
         serviceRawGroups.forEach(function(item){
@@ -1225,19 +1230,24 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
           var wrappedProperties = [];
 
           item.properties.forEach(function (propertyData) {
-            var parentSCP = service.configs.filterProperty('filename', propertyData.filename).findProperty('name', propertyData.name);
-            var overriddenSCP = App.ServiceConfigProperty.create(parentSCP);
-            overriddenSCP.set('isOriginalSCP', false);
-            overriddenSCP.set('parentSCP', parentSCP);
-            overriddenSCP.set('group', modelGroup);
-            overriddenSCP.setProperties(propertyData);
+            var overriddenSCP, parentSCP = service.configs.filterProperty('filename', propertyData.filename).findProperty('name', propertyData.name);
+            if (parentSCP) {
+              overriddenSCP = App.ServiceConfigProperty.create(parentSCP);
+              overriddenSCP.set('parentSCP', parentSCP);
+            } else {
+              overriddenSCP = App.config.createCustomGroupConfig(propertyData.name, propertyData.filename, propertyData.value, modelGroup, true, false);
+              this.get('stepConfigs').findProperty('serviceName', service.serviceName).get('configs').pushObject(overriddenSCP);
+            }
+              overriddenSCP.set('isOriginalSCP', false);
+              overriddenSCP.set('group', modelGroup);
+              overriddenSCP.setProperties(propertyData);
             wrappedProperties.pushObject(App.ServiceConfigProperty.create(overriddenSCP));
-          });
+          }, this);
           modelGroup.set('properties', wrappedProperties);
         }, this);
         service.set('configGroups', App.ServiceConfigGroup.find().filterProperty('serviceName', service.get('serviceName')));
       }
-    });
+    }, this);
   },
 
   /**
@@ -1318,6 +1328,7 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
    * @method _setOverrides
    */
   _setOverrides: function (config, overrides) {
+    if (config.get('group')) return config;
     var selectedGroup = this.get('selectedConfigGroup'),
       overrideToAdd = this.get('overrideToAdd'),
       configOverrides = overrides.filterProperty('name', config.get('name'));

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7055ae7/ambari-web/app/routes/add_service_routes.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/add_service_routes.js b/ambari-web/app/routes/add_service_routes.js
index 8658142..d6bab53 100644
--- a/ambari-web/app/routes/add_service_routes.js
+++ b/ambari-web/app/routes/add_service_routes.js
@@ -201,6 +201,7 @@ module.exports = App.WizardRoute.extend({
           });
           router.get('wizardStep7Controller').set('recommendationsConfigs', null);
           router.get('wizardStep7Controller').clearAllRecommendations();
+          addServiceController.setDBProperty('serviceConfigGroups', undefined);
           router.transitionTo('step4');
         });
       });

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7055ae7/ambari-web/test/controllers/wizard_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/wizard_test.js b/ambari-web/test/controllers/wizard_test.js
index d9239200..0264472 100644
--- a/ambari-web/test/controllers/wizard_test.js
+++ b/ambari-web/test/controllers/wizard_test.js
@@ -1143,7 +1143,6 @@ describe('App.WizardController', function () {
             isRequiredByAgent: true,
             hasInitialValue: true,
             isRequired: true,
-            group: {name: 'group'},
             showLabel: true,
             category: 'some_category'
           })
@@ -1177,10 +1176,10 @@ describe('App.WizardController', function () {
       })
     ]});
 
-    it('should save configs to content.serviceConfigProperties', function () {
+    it('should save configs from default config group to content.serviceConfigProperties', function () {
       c.saveServiceConfigProperties(stepController);
       var saved = c.get('content.serviceConfigProperties');
-      expect(saved.length).to.equal(2);
+      expect(saved.length).to.equal(1);
       expect(saved[0].category).to.equal('some_category');
     });
 


[28/50] [abbrv] ambari git commit: AMBARI-15031. Duplicate key violation during ldap sync (Oliver Szabo via rlevas)

Posted by jo...@apache.org.
AMBARI-15031. Duplicate key violation during ldap sync  (Oliver Szabo via rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e1875539
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e1875539
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e1875539

Branch: refs/heads/branch-dev-patch-upgrade
Commit: e18755393326d10564f7d68c2d025572fed25d63
Parents: ed55354
Author: Oliver Szabo <os...@hortonworks.com>
Authored: Fri Feb 12 16:26:31 2016 -0500
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Fri Feb 12 16:26:35 2016 -0500

----------------------------------------------------------------------
 .../ambari/server/security/ldap/AmbariLdapDataPopulator.java    | 5 ++++-
 .../org/apache/ambari/server/security/ldap/LdapBatchDto.java    | 5 +++++
 .../server/security/ldap/AmbariLdapDataPopulatorTest.java       | 4 ++++
 3 files changed, 13 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e1875539/ambari-server/src/main/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulator.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulator.java b/ambari-server/src/main/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulator.java
index 21492cf..801e43e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulator.java
@@ -485,8 +485,11 @@ public class AmbariLdapDataPopulator {
         batchInfo.getGroupsToBecomeLdap().add(groupName);
       }
       internalGroupsMap.remove(groupName);
+      batchInfo.getGroupsProcessedInternal().add(groupName);
     } else {
-      batchInfo.getGroupsToBeCreated().add(groupName);
+      if (!batchInfo.getGroupsProcessedInternal().contains(groupName)) {
+        batchInfo.getGroupsToBeCreated().add(groupName);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e1875539/ambari-server/src/main/java/org/apache/ambari/server/security/ldap/LdapBatchDto.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/ldap/LdapBatchDto.java b/ambari-server/src/main/java/org/apache/ambari/server/security/ldap/LdapBatchDto.java
index 9247f38..bb9c5ee 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/ldap/LdapBatchDto.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/ldap/LdapBatchDto.java
@@ -27,6 +27,7 @@ public class LdapBatchDto {
   private final Set<String> groupsToBecomeLdap = new HashSet<String>();
   private final Set<String> groupsToBeCreated = new HashSet<String>();
   private final Set<String> groupsToBeRemoved = new HashSet<String>();
+  private final Set<String> groupsProcessedInternal = new HashSet<>();
   private final Set<String> usersToBecomeLdap = new HashSet<String>();
   private final Set<String> usersToBeCreated = new HashSet<String>();
   private final Set<String> usersToBeRemoved = new HashSet<String>();
@@ -64,4 +65,8 @@ public class LdapBatchDto {
   public Set<String> getUsersToBeRemoved() {
     return usersToBeRemoved;
   }
+
+  public Set<String> getGroupsProcessedInternal() {
+    return groupsProcessedInternal;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e1875539/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
index be92871..8ce6c5b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
@@ -348,6 +348,10 @@ public class AmbariLdapDataPopulatorTest {
     assertTrue(result.getMembershipToRemove().isEmpty());
     assertTrue(result.getUsersToBecomeLdap().isEmpty());
     assertTrue(result.getUsersToBeRemoved().isEmpty());
+    assertTrue(result.getGroupsProcessedInternal().contains("group1"));
+    assertTrue(result.getGroupsProcessedInternal().contains("group2"));
+    assertTrue(!result.getGroupsProcessedInternal().contains("xgroup1"));
+    assertTrue(!result.getGroupsProcessedInternal().contains("xgroup2"));
     verify(populator.loadLdapTemplate(), populator);
   }
 


[49/50] [abbrv] ambari git commit: AMBARI-15057. Oozie untar and prepare-war should be only done exclusively in preupload.py (aonishuk)

Posted by jo...@apache.org.
AMBARI-15057. Oozie untar and prepare-war should be only done exclusively in preupload.py (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0ce5fea6
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0ce5fea6
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0ce5fea6

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 0ce5fea6dc47085b75ca83de455f059d7ffc6976
Parents: a4f8a95
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Feb 16 19:40:10 2016 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Feb 16 19:40:10 2016 +0200

----------------------------------------------------------------------
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    | 11 +---
 .../main/resources/scripts/Ambaripreupload.py   | 59 ++++++++++++++++----
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     | 33 ++---------
 3 files changed, 57 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0ce5fea6/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index 7591bad..2dd362a 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -17,7 +17,6 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-import hashlib
 import os
 
 from resource_management.core.resources.service import ServiceConfig
@@ -228,11 +227,7 @@ def prepare_war():
 
   if run_prepare_war:
     # Time-consuming to run
-    Execute(command,
-            user=params.oozie_user
-    )
-
-    return_code, output = shell.call(command, user=params.oozie_user, logoutput=False, quiet=False)
+    return_code, output = shell.call(command, user=params.oozie_user)
     if output is None:
       output = ""
 
@@ -273,8 +268,7 @@ def oozie_server_specific():
   )
   
   hashcode_file = format("{oozie_home}/.hashcode")
-  hashcode = hashlib.md5(format('{oozie_home}/oozie-sharelib.tar.gz')).hexdigest()
-  skip_recreate_sharelib = format("test -f {hashcode_file} && test -d {oozie_home}/share && [[ `cat {hashcode_file}` == '{hashcode}' ]]")
+  skip_recreate_sharelib = format("test -f {hashcode_file} && test -d {oozie_home}/share")
 
   untar_sharelib = ('tar','-xvf',format('{oozie_home}/oozie-sharelib.tar.gz'),'-C',params.oozie_home)
 
@@ -319,7 +313,6 @@ def oozie_server_specific():
   prepare_war()
 
   File(hashcode_file,
-       content = hashcode,
        mode = 0644,
   )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/0ce5fea6/ambari-server/src/main/resources/scripts/Ambaripreupload.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/Ambaripreupload.py b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
index 5a20698..61db286 100644
--- a/ambari-server/src/main/resources/scripts/Ambaripreupload.py
+++ b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
@@ -26,7 +26,6 @@ sys.path.append("/usr/lib/python2.6/site-packages")
 import glob
 from logging import thread
 import re
-import hashlib
 import tempfile
 import time
 import functools
@@ -143,6 +142,7 @@ with Environment() as env:
     hdfs_site = ConfigDictionary({'dfs.webhdfs.enabled':False, 
     })
     fs_default = get_fs_root()
+    oozie_secure = ''
     oozie_env_sh_template = \
   '''
   #!/bin/bash
@@ -232,14 +232,13 @@ with Environment() as env:
     source_and_dest_pairs = [(component_tar_source_file, destination_file), ]
     return _copy_files(source_and_dest_pairs, file_owner, group_owner, kinit_if_needed)
 
-
-
   env.set_params(params)
   hadoop_conf_dir = params.hadoop_conf_dir
    
   oozie_libext_dir = format("/usr/hdp/{hdp_version}/oozie/libext")
   oozie_home=format("/usr/hdp/{hdp_version}/oozie")
   oozie_setup_sh=format("/usr/hdp/{hdp_version}/oozie/bin/oozie-setup.sh")
+  oozie_setup_sh_current="/usr/hdp/current/oozie-server/bin/oozie-setup.sh"
   oozie_tmp_dir = "/var/tmp/oozie"
   configure_cmds = []
   configure_cmds.append(('tar','-xvf', oozie_home + '/oozie-sharelib.tar.gz','-C', oozie_home))
@@ -254,22 +253,62 @@ with Environment() as env:
   )
 
   hashcode_file = format("{oozie_home}/.hashcode")
-  hashcode = hashlib.md5(format('{oozie_home}/oozie-sharelib.tar.gz')).hexdigest()
-  skip_recreate_sharelib = format("test -f {hashcode_file} && test -d {oozie_home}/share && [[ `cat {hashcode_file}` == '{hashcode}' ]]")
+  skip_recreate_sharelib = format("test -f {hashcode_file} && test -d {oozie_home}/share")
 
   Execute( configure_cmds,
            not_if  = format("{no_op_test} || {skip_recreate_sharelib}"), 
            sudo = True,
            )
-  Execute(format("cd {oozie_tmp_dir} && {oozie_setup_sh} prepare-war"),
-    user = params.oozie_user,
-    not_if  = format("{no_op_test} || {skip_recreate_sharelib}")
-  )
+  
   File(hashcode_file,
-       content = hashcode,
        mode = 0644,
   )
+  
+  ###############################################
+  # PREPARE-WAR [BEGIN]
+  ###############################################
+  prepare_war_cmd_file = format("{oozie_home}/.prepare_war_cmd")
+
+  # DON'T CHANGE THE VALUE SINCE IT'S USED TO DETERMINE WHETHER TO RUN THE COMMAND OR NOT BY READING THE MARKER FILE.
+  # Oozie tmp dir should be /var/tmp/oozie and is already created by a function above.
+  command = format("cd {oozie_tmp_dir} && {oozie_setup_sh} prepare-war {oozie_secure} ")
+  command_to_file = format("cd {oozie_tmp_dir} && {oozie_setup_sh_current} prepare-war {oozie_secure} ")
+
+  run_prepare_war = False
+  if os.path.exists(prepare_war_cmd_file):
+    cmd = ""
+    with open(prepare_war_cmd_file, "r") as f:
+      cmd = f.readline().strip()
 
+    if command_to_file != cmd:
+      run_prepare_war = True
+      Logger.info(format("Will run prepare war cmd since marker file {prepare_war_cmd_file} has contents which differ.\n" \
+      "Expected: {command_to_file}.\nActual: {cmd}."))
+  else:
+    run_prepare_war = True
+    Logger.info(format("Will run prepare war cmd since marker file {prepare_war_cmd_file} is missing."))
+
+  if run_prepare_war:
+    # Time-consuming to run
+    return_code, output = shell.call(command, user=params.oozie_user)
+    if output is None:
+      output = ""
+
+    if return_code != 0 or "New Oozie WAR file with added".lower() not in output.lower():
+      message = "Unexpected Oozie WAR preparation output {0}".format(output)
+      Logger.error(message)
+      raise Fail(message)
+
+    # Generate marker file
+    File(prepare_war_cmd_file,
+         content=command_to_file,
+         mode=0644,
+    )
+  else:
+    Logger.info(format("No need to run prepare-war since marker file {prepare_war_cmd_file} already exists."))
+  ###############################################
+  # PREPARE-WAR END [BEGIN]
+  ###############################################
   oozie_shared_lib = format("/usr/hdp/{hdp_version}/oozie/share")
   oozie_user = 'oozie'
   oozie_hdfs_user_dir = format("{hdfs_path_prefix}/user/{oozie_user}")

http://git-wip-us.apache.org/repos/asf/ambari/blob/0ce5fea6/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index b9c0717..ba1b84a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -25,14 +25,9 @@ from resource_management.core import shell
 from resource_management.core.exceptions import Fail
 from resource_management.libraries import functions
 from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
-import hashlib
 import tempfile
 
-md5_mock = MagicMock()
-md5_mock.hexdigest.return_value = "abc123hash"
-
 @patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
-@patch.object(hashlib, "md5", new=MagicMock(return_value=md5_mock))
 @patch.object(WebHDFSUtil, "run_command", new=MagicMock(return_value={}))
 @patch.object(tempfile, "gettempdir", new=MagicMock(return_value="/tmp"))
 class TestOozieServer(RMFTestCase):
@@ -221,7 +216,7 @@ class TestOozieServer(RMFTestCase):
                               create_parents = True,
                               )
     self.assertResourceCalled('Execute', ('tar', '-xvf', '/usr/lib/oozie/oozie-sharelib.tar.gz', '-C', '/usr/lib/oozie'),
-        not_if = "ambari-sudo.sh su oozie -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1' || test -f /usr/lib/oozie/.hashcode && test -d /usr/lib/oozie/share && [[ `cat /usr/lib/oozie/.hashcode` == 'abc123hash' ]]",
+        not_if = "ambari-sudo.sh su oozie -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1' || test -f /usr/lib/oozie/.hashcode && test -d /usr/lib/oozie/share",
         sudo = True,
     )
     self.assertResourceCalled('Execute', ('cp', '/usr/share/HDP-oozie/ext-2.2.zip', '/usr/lib/oozie/libext'),
@@ -259,15 +254,11 @@ class TestOozieServer(RMFTestCase):
         not_if = "ambari-sudo.sh su oozie -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1'",
     )
 
-    self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-setup.sh prepare-war',
-                              user = "oozie"
-    )
     self.assertResourceCalled('File', '/usr/lib/oozie/.prepare_war_cmd',
                               content = 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-setup.sh prepare-war',
                               mode = 0644,
     )
     self.assertResourceCalled('File', '/usr/lib/oozie/.hashcode',
-                              content = 'abc123hash',
                               mode = 0644,
     )
     self.assertResourceCalled('Directory', '/var/lib/oozie/oozie-server',
@@ -443,9 +434,9 @@ class TestOozieServer(RMFTestCase):
                               create_parents = True,
                               )
     self.assertResourceCalled('Execute', ('tar', '-xvf', '/usr/lib/oozie/oozie-sharelib.tar.gz', '-C', '/usr/lib/oozie'),
-                              not_if = "ambari-sudo.sh su oozie -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1' || test -f /usr/lib/oozie/.hashcode && test -d /usr/lib/oozie/share && [[ `cat /usr/lib/oozie/.hashcode` == 'abc123hash' ]]",
-                              sudo = True,
-                              )
+        not_if = "ambari-sudo.sh su oozie -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1' || test -f /usr/lib/oozie/.hashcode && test -d /usr/lib/oozie/share",
+        sudo = True,
+    )
     self.assertResourceCalled('Execute', ('cp', '/usr/share/HDP-oozie/ext-2.2.zip', '/usr/lib/oozie/libext'),
                               not_if = "ambari-sudo.sh su oozie -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1'",
                               sudo = True,
@@ -482,15 +473,11 @@ class TestOozieServer(RMFTestCase):
     self.assertResourceCalled('Execute', 'ambari-sudo.sh chown oozie:hadoop /usr/lib/oozie/libext/falcon-oozie-el-extension-*.jar',
                               not_if = "ambari-sudo.sh su oozie -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1'",
                               )
-    self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-setup.sh prepare-war',
-                              user = "oozie"
-    )
     self.assertResourceCalled('File', '/usr/lib/oozie/.prepare_war_cmd',
                               content = 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-setup.sh prepare-war',
                               mode = 0644,
     )
     self.assertResourceCalled('File', '/usr/lib/oozie/.hashcode',
-                              content = 'abc123hash',
                               mode = 0644,
     )
     self.assertResourceCalled('Directory', '/var/lib/oozie/oozie-server',
@@ -822,7 +809,7 @@ class TestOozieServer(RMFTestCase):
         create_parents = True,
     )
     self.assertResourceCalled('Execute', ('tar', '-xvf', '/usr/lib/oozie/oozie-sharelib.tar.gz', '-C', '/usr/lib/oozie'),
-        not_if = "ambari-sudo.sh su oozie -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1' || test -f /usr/lib/oozie/.hashcode && test -d /usr/lib/oozie/share && [[ `cat /usr/lib/oozie/.hashcode` == 'abc123hash' ]]",
+        not_if = "ambari-sudo.sh su oozie -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1' || test -f /usr/lib/oozie/.hashcode && test -d /usr/lib/oozie/share",
         sudo = True,
     )
     self.assertResourceCalled('Execute', ('cp', '/usr/share/HDP-oozie/ext-2.2.zip', '/usr/lib/oozie/libext'),
@@ -846,15 +833,11 @@ class TestOozieServer(RMFTestCase):
         not_if = "ambari-sudo.sh su oozie -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1'",
     )
 
-    self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-setup.sh prepare-war',
-                              user="oozie")
-
     self.assertResourceCalled('File', '/usr/lib/oozie/.prepare_war_cmd',
                               content = 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-setup.sh prepare-war',
                               mode = 0644,
     )
     self.assertResourceCalled('File', '/usr/lib/oozie/.hashcode',
-                              content = 'abc123hash',
                               mode = 0644,
     )
     self.assertResourceCalled('Directory', '/var/lib/oozie/oozie-server',
@@ -1015,7 +998,7 @@ class TestOozieServer(RMFTestCase):
         create_parents = True,
     )
     self.assertResourceCalled('Execute', ('tar', '-xvf', '/usr/lib/oozie/oozie-sharelib.tar.gz', '-C', '/usr/lib/oozie'),
-        not_if = "ambari-sudo.sh su oozie -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1' || test -f /usr/lib/oozie/.hashcode && test -d /usr/lib/oozie/share && [[ `cat /usr/lib/oozie/.hashcode` == 'abc123hash' ]]",
+        not_if = "ambari-sudo.sh su oozie -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1' || test -f /usr/lib/oozie/.hashcode && test -d /usr/lib/oozie/share",
         sudo = True,
     )
     self.assertResourceCalled('Execute', ('cp', '/usr/share/HDP-oozie/ext-2.2.zip', '/usr/lib/oozie/libext'),
@@ -1039,15 +1022,11 @@ class TestOozieServer(RMFTestCase):
         not_if = "ambari-sudo.sh su oozie -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1'",
     )
 
-    self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-setup.sh prepare-war -secure',
-                              user="oozie")
-
     self.assertResourceCalled('File', '/usr/lib/oozie/.prepare_war_cmd',
                               content = 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-setup.sh prepare-war -secure',
                               mode = 0644,
     )
     self.assertResourceCalled('File', '/usr/lib/oozie/.hashcode',
-                              content = 'abc123hash',
                               mode = 0644,
     )
     self.assertResourceCalled('Directory', '/var/lib/oozie/oozie-server',


[09/50] [abbrv] ambari git commit: AMBARI-15010 YARN metrics not written due to permissions on metrics properties file (dsen)

Posted by jo...@apache.org.
AMBARI-15010 YARN metrics not written due to permissions on metrics properties file (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9419400d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9419400d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9419400d

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 9419400da987d1a09c17ce511c1fdc307ad7ff65
Parents: a638ccb
Author: Dmytro Sen <ds...@apache.org>
Authored: Thu Feb 11 23:29:27 2016 +0200
Committer: Dmytro Sen <ds...@apache.org>
Committed: Thu Feb 11 23:29:27 2016 +0200

----------------------------------------------------------------------
 .../0.8/hooks/before-START/scripts/shared_initialization.py      | 1 +
 .../2.0.6/hooks/before-START/scripts/shared_initialization.py    | 1 +
 .../python/stacks/2.0.6/hooks/before-START/test_before_start.py  | 4 ++++
 3 files changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9419400d/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py
index 2d2331a..265502f 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py
@@ -85,6 +85,7 @@ def setup_hadoop():
 
     File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
          owner=params.hdfs_user,
+         group=params.user_group,
          content=Template("hadoop-metrics2.properties.j2")
     )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9419400d/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
index 85d8fec..21d3b43 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
@@ -98,6 +98,7 @@ def setup_hadoop():
 
       File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
            owner=params.hdfs_user,
+           group=params.user_group,
            content=Template("hadoop-metrics2.properties.j2")
       )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9419400d/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
index da2b87f..90bd968 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
@@ -73,6 +73,7 @@ class TestHookBeforeStart(RMFTestCase):
                               )
     self.assertResourceCalled('File', '/etc/hadoop/conf/hadoop-metrics2.properties',
                               content = Template('hadoop-metrics2.properties.j2'),
+                              group='hadoop',
                               owner = 'hdfs',
                               )
     self.assertResourceCalled('File', '/etc/hadoop/conf/task-log4j.properties',
@@ -146,6 +147,7 @@ class TestHookBeforeStart(RMFTestCase):
                               )
     self.assertResourceCalled('File', '/etc/hadoop/conf/hadoop-metrics2.properties',
                               content = Template('hadoop-metrics2.properties.j2'),
+                              group='hadoop',
                               owner = 'hdfs',
                               )
     self.assertResourceCalled('File', '/etc/hadoop/conf/task-log4j.properties',
@@ -224,6 +226,7 @@ class TestHookBeforeStart(RMFTestCase):
     )
     self.assertResourceCalled('File', '/etc/hadoop/conf/hadoop-metrics2.properties',
                               content = Template('hadoop-metrics2.properties.j2'),
+                              group='hadoop',
                               owner = 'hdfs',
                               )
     self.assertResourceCalled('File', '/etc/hadoop/conf/task-log4j.properties',
@@ -304,6 +307,7 @@ class TestHookBeforeStart(RMFTestCase):
     )
     self.assertResourceCalled('File', '/etc/hadoop/conf/hadoop-metrics2.properties',
                               content = Template('hadoop-metrics2.properties.j2'),
+                              group='hadoop',
                               owner = 'hdfs',
                               )
     self.assertResourceCalled('File', '/etc/hadoop/conf/task-log4j.properties',


[13/50] [abbrv] ambari git commit: AMBARI-14966: Stack Advisor incorrectly recommends Slave component on a host which does not have it installed during Add service wizard (bhuvnesh2703 via jaoki)

Posted by jo...@apache.org.
AMBARI-14966: Stack Advisor incorrectly recommends Slave component on a host which does not have it installed during Add service wizard (bhuvnesh2703 via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8aab6327
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8aab6327
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8aab6327

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 8aab63270ebfe34aa75c47a150fbb4a1838d0ec1
Parents: 6c6ec63
Author: Jun Aoki <ja...@apache.org>
Authored: Thu Feb 11 17:05:46 2016 -0800
Committer: Jun Aoki <ja...@apache.org>
Committed: Thu Feb 11 17:05:46 2016 -0800

----------------------------------------------------------------------
 .../src/main/resources/stacks/stack_advisor.py  |  3 +-
 .../stacks/2.0.6/common/test_stack_advisor.py   | 59 ++++++++++++++++++++
 2 files changed, 61 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8aab6327/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index d993feb..539bd25 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -409,7 +409,8 @@ class DefaultStackAdvisor(StackAdvisor):
                 hostsMin = int(cardinality)
               if hostsMin > len(hostsForComponent):
                 hostsForComponent.extend(freeHosts[0:hostsMin-len(hostsForComponent)])
-            else:
+            # Components which are already installed, keep the recommendation as the existing layout
+            elif not componentIsPopulated:
               hostsForComponent.extend(freeHosts)
               if not hostsForComponent:  # hostsForComponent is empty
                 hostsForComponent = hostsList[-1:]

http://git-wip-us.apache.org/repos/asf/ambari/blob/8aab6327/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index 22b16bb..4f059ba 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -1941,3 +1941,62 @@ class TestHDP206StackAdvisor(TestCase):
     siteProperties = stack_advisor.getServicesSiteProperties(services, "ranger-admin-site")
     self.assertEquals(siteProperties, expected)
 
+  def test_createComponentLayoutRecommendations_addService_1freeHost(self):
+    """
+    Test that already installed slaves are not added to any free hosts (not having any component installed)
+    as part of recommendation received during Add service operation.
+    For already installed services, recommendation for installed components should match the existing layout
+    """
+
+    services = {
+                  "services" : [
+                 {
+                    "StackServices" : {
+                      "service_name" : "HDFS"
+                    },
+                    "components" : [ {
+                      "StackServiceComponents" : {
+                        "cardinality" : "1+",
+                        "component_category" : "SLAVE",
+                        "component_name" : "DATANODE",
+                        "hostnames" : [ "c6401.ambari.apache.org" ]
+                      }
+                    } ]
+                 } ]
+              }
+
+    hosts = self.prepareHosts(["c6401.ambari.apache.org", "c6402.ambari.apache.org"])
+    recommendations = self.stackAdvisor.createComponentLayoutRecommendations(services, hosts)
+    """
+    Recommendation received should be as below:
+                               {
+                                  'blueprint': {
+                                          'host_groups': [{
+                                                  'name': 'host-group-1',
+                                                  'components': []
+                                          }, {
+                                                  'name': 'host-group-2',
+                                                  'components': [{
+                                                          'name': 'DATANODE'
+                                                  }]
+                                          }]
+                                  },
+                                  'blueprint_cluster_binding': {
+                                          'host_groups': [{
+                                                  'hosts': [{
+                                                          'fqdn': 'c6402.ambari.apache.org'
+                                                  }],
+                                                  'name': 'host-group-1'
+                                          }, {
+                                                  'hosts': [{
+                                                          'fqdn': 'c6401.ambari.apache.org'
+                                                  }],
+                                                  'name': 'host-group-2'
+                                          }]
+                                  }
+                           }
+    """
+    # Assert that the list is empty for host-group-1
+    self.assertFalse(recommendations['blueprint']['host_groups'][0]['components'])
+    # Assert that DATANODE is placed on host-group-2
+    self.assertEquals(recommendations['blueprint']['host_groups'][1]['components'][0]['name'], 'DATANODE')


[04/50] [abbrv] ambari git commit: AMBARI-14980. Provide explicit ordering for roles (rlevas)

Posted by jo...@apache.org.
AMBARI-14980. Provide explicit ordering for roles (rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2871d674
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2871d674
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2871d674

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 2871d674f57f46220c2448c6e5dbcb456f04878f
Parents: e049216
Author: Robert Levas <rl...@hortonworks.com>
Authored: Thu Feb 11 12:59:00 2016 -0500
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Thu Feb 11 12:59:04 2016 -0500

----------------------------------------------------------------------
 .../internal/PermissionResourceProvider.java    |  3 ++
 .../server/orm/entities/PermissionEntity.java   | 35 +++++++++++-
 .../server/upgrade/UpgradeCatalog240.java       | 32 ++++++++++-
 .../main/resources/Ambari-DDL-Derby-CREATE.sql  | 17 +++---
 .../main/resources/Ambari-DDL-MySQL-CREATE.sql  | 17 +++---
 .../main/resources/Ambari-DDL-Oracle-CREATE.sql | 19 +++----
 .../resources/Ambari-DDL-Postgres-CREATE.sql    | 17 +++---
 .../Ambari-DDL-Postgres-EMBEDDED-CREATE.sql     | 17 +++---
 .../resources/Ambari-DDL-SQLAnywhere-CREATE.sql | 17 +++---
 .../resources/Ambari-DDL-SQLServer-CREATE.sql   | 17 +++---
 .../PermissionResourceProviderTest.java         |  2 +
 .../server/upgrade/UpgradeCatalog240Test.java   | 57 ++++++++++++++++++--
 12 files changed, 187 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2871d674/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PermissionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PermissionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PermissionResourceProvider.java
index 640123e..86f8321 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PermissionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PermissionResourceProvider.java
@@ -52,6 +52,7 @@ public class PermissionResourceProvider extends AbstractResourceProvider {
   public static final String PERMISSION_NAME_PROPERTY_ID = "PermissionInfo/permission_name";
   public static final String PERMISSION_LABEL_PROPERTY_ID = "PermissionInfo/permission_label";
   public static final String RESOURCE_NAME_PROPERTY_ID   = "PermissionInfo/resource_name";
+  public static final String SORT_ORDER_PROPERTY_ID   = "PermissionInfo/sort_order";
 
 
   /**
@@ -71,6 +72,7 @@ public class PermissionResourceProvider extends AbstractResourceProvider {
     propertyIds.add(PERMISSION_NAME_PROPERTY_ID);
     propertyIds.add(PERMISSION_LABEL_PROPERTY_ID);
     propertyIds.add(RESOURCE_NAME_PROPERTY_ID);
+    propertyIds.add(SORT_ORDER_PROPERTY_ID);
   }
 
 
@@ -156,6 +158,7 @@ public class PermissionResourceProvider extends AbstractResourceProvider {
     setResourceProperty(resource, PERMISSION_NAME_PROPERTY_ID, entity.getPermissionName(), requestedIds);
     setResourceProperty(resource, PERMISSION_LABEL_PROPERTY_ID, entity.getPermissionLabel(), requestedIds);
     setResourceProperty(resource, RESOURCE_NAME_PROPERTY_ID, entity.getResourceType().getName(), requestedIds);
+    setResourceProperty(resource, SORT_ORDER_PROPERTY_ID, entity.getSortOrder(), requestedIds);
 
     return resource;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/2871d674/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/PermissionEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/PermissionEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/PermissionEntity.java
index a692730..43fd71b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/PermissionEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/PermissionEntity.java
@@ -57,8 +57,11 @@ public class PermissionEntity {
    * Admin permission name constants.
    */
   public static final String AMBARI_ADMINISTRATOR_PERMISSION_NAME = "AMBARI.ADMINISTRATOR";
-  public static final String CLUSTER_USER_PERMISSION_NAME = "CLUSTER.USER";
   public static final String CLUSTER_ADMINISTRATOR_PERMISSION_NAME = "CLUSTER.ADMINISTRATOR";
+  public static final String CLUSTER_OPERATOR_PERMISSION_NAME = "CLUSTER.OPERATOR";
+  public static final String SERVICE_ADMINISTRATOR_PERMISSION_NAME = "SERVICE.ADMINISTRATOR";
+  public static final String SERVICE_OPERATOR_PERMISSION_NAME = "SERVICE.OPERATOR";
+  public static final String CLUSTER_USER_PERMISSION_NAME = "CLUSTER.USER";
   public static final String VIEW_USER_PERMISSION_NAME = "VIEW.USER";
 
   /**
@@ -102,6 +105,11 @@ public class PermissionEntity {
   )
   private Collection<RoleAuthorizationEntity> authorizations;
 
+  /**
+   * The permission's explicit sort order
+   */
+  @Column(name = "sort_order", nullable = false)
+  private Integer sortOrder = 1;
 
   // ----- PermissionEntity ---------------------------------------------------
 
@@ -195,7 +203,28 @@ public class PermissionEntity {
     this.authorizations = authorizations;
   }
 
-// ----- Object overrides --------------------------------------------------
+  /**
+   * Gets the explicit sort order value for this PermissionEntity
+   * <p/>
+   * This value is used to help explicitly order permission entities. For example, order from most
+   * permissive to least permissive.
+   *
+   * @return the explict sorting order value
+   */
+  public Integer getSortOrder() {
+    return sortOrder;
+  }
+
+  /**
+   * Sets the explicit sort order value for this PermissionEntity
+   *
+   * @param sortOrder a sorting order value
+   */
+  public void setSortOrder(Integer sortOrder) {
+    this.sortOrder = sortOrder;
+  }
+
+  // ----- Object overrides --------------------------------------------------
 
   @Override
   public boolean equals(Object o) {
@@ -208,6 +237,7 @@ public class PermissionEntity {
         !(permissionName != null ? !permissionName.equals(that.permissionName) : that.permissionName != null) &&
         !(permissionLabel != null ? !permissionLabel.equals(that.permissionLabel) : that.permissionLabel != null) &&
         !(resourceType != null ? !resourceType.equals(that.resourceType) : that.resourceType != null) &&
+        !(sortOrder != null ? !sortOrder.equals(that.sortOrder) : that.sortOrder != null) &&
         !(authorizations != null ? !authorizations.equals(that.authorizations) : that.authorizations != null);
   }
 
@@ -217,6 +247,7 @@ public class PermissionEntity {
     result = 31 * result + (permissionName != null ? permissionName.hashCode() : 0);
     result = 31 * result + (permissionLabel != null ? permissionLabel.hashCode() : 0);
     result = 31 * result + (resourceType != null ? resourceType.hashCode() : 0);
+    result = 31 * result + (sortOrder != null ? sortOrder.hashCode() : 0);
     result = 31 * result + (authorizations != null ? authorizations.hashCode() : 0);
     return result;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/2871d674/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
index 3414388..2ea326a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
@@ -27,9 +27,11 @@ import com.google.inject.Inject;
 import com.google.inject.Injector;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
 import org.apache.ambari.server.orm.dao.DaoUtils;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
+import org.apache.ambari.server.orm.entities.PermissionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.slf4j.Logger;
@@ -49,6 +51,10 @@ import java.util.UUID;
  */
 public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
 
+  protected static final String ADMIN_PERMISSION_TABLE = "adminpermission";
+  protected static final String PERMISSION_ID_COL = "permission_name";
+  protected static final String SORT_ORDER_COL = "sort_order";
+
   @Inject
   DaoUtils daoUtils;
 
@@ -96,7 +102,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
 
   @Override
   protected void executeDDLUpdates() throws AmbariException, SQLException {
-    //To change body of implemented methods use File | Settings | File Templates.
+    updateAdminPermissionTable();
   }
 
   @Override
@@ -108,6 +114,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
   protected void executeDMLUpdates() throws AmbariException, SQLException {
     addNewConfigurationsFromXml();
     updateAlerts();
+    setRoleSortOrder();
 
   }
 
@@ -303,6 +310,29 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
     return sourceJson.toString();
   }
 
+  protected void updateAdminPermissionTable() throws SQLException {
+    // Add the sort_order column to the adminpermission table
+    dbAccessor.addColumn(ADMIN_PERMISSION_TABLE, new DBAccessor.DBColumnInfo(SORT_ORDER_COL, Short.class, null, 1, false));
+  }
 
+  protected void setRoleSortOrder() throws SQLException {
+    String updateStatement = "UPDATE " + ADMIN_PERMISSION_TABLE + " SET " + SORT_ORDER_COL + "=%d WHERE " + PERMISSION_ID_COL + "='%s'";
+
+    LOG.info("Setting permission labels");
+    dbAccessor.executeUpdate(String.format(updateStatement,
+        1, PermissionEntity.AMBARI_ADMINISTRATOR_PERMISSION_NAME));
+    dbAccessor.executeUpdate(String.format(updateStatement,
+        2, PermissionEntity.CLUSTER_ADMINISTRATOR_PERMISSION_NAME));
+    dbAccessor.executeUpdate(String.format(updateStatement,
+        3, PermissionEntity.CLUSTER_OPERATOR_PERMISSION_NAME));
+    dbAccessor.executeUpdate(String.format(updateStatement,
+        4, PermissionEntity.SERVICE_ADMINISTRATOR_PERMISSION_NAME));
+    dbAccessor.executeUpdate(String.format(updateStatement,
+        5, PermissionEntity.SERVICE_OPERATOR_PERMISSION_NAME));
+    dbAccessor.executeUpdate(String.format(updateStatement,
+        6, PermissionEntity.CLUSTER_USER_PERMISSION_NAME));
+    dbAccessor.executeUpdate(String.format(updateStatement,
+        7, PermissionEntity.VIEW_USER_PERMISSION_NAME));
+  }
 
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/2871d674/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
index 0fdfd2b..8aee031 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
@@ -525,6 +525,7 @@ CREATE TABLE adminpermission (
   permission_name VARCHAR(255) NOT NULL,
   resource_type_id INTEGER NOT NULL,
   permission_label VARCHAR(255),
+  sort_order SMALLINT NOT NULL DEFAULT 1,
   PRIMARY KEY(permission_id));
 
 CREATE TABLE roleauthorization (
@@ -1058,20 +1059,20 @@ INSERT INTO adminprincipal (principal_id, principal_type_id)
 INSERT INTO Users (user_id, principal_id, user_name, user_password)
   SELECT 1, 1, 'admin', '538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00' FROM SYSIBM.SYSDUMMY1;
 
-insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label)
-  SELECT 1, 'AMBARI.ADMINISTRATOR', 1, 'Administrator' FROM SYSIBM.SYSDUMMY1
+insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label, sort_order)
+  SELECT 1, 'AMBARI.ADMINISTRATOR', 1, 'Administrator', 1 FROM SYSIBM.SYSDUMMY1
   UNION ALL
-  SELECT 2, 'CLUSTER.USER', 2, 'Cluster User' FROM SYSIBM.SYSDUMMY1
+  SELECT 2, 'CLUSTER.USER', 2, 'Cluster User', 6 FROM SYSIBM.SYSDUMMY1
   UNION ALL
-  SELECT 3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator' FROM SYSIBM.SYSDUMMY1
+  SELECT 3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator', 2 FROM SYSIBM.SYSDUMMY1
   UNION ALL
-  SELECT 4, 'VIEW.USER', 3, 'View User' FROM SYSIBM.SYSDUMMY1
+  SELECT 4, 'VIEW.USER', 3, 'View User', 7 FROM SYSIBM.SYSDUMMY1
   UNION ALL
-  SELECT 5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator' FROM SYSIBM.SYSDUMMY1
+  SELECT 5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator', 3 FROM SYSIBM.SYSDUMMY1
   UNION ALL
-  SELECT 6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator' FROM SYSIBM.SYSDUMMY1
+  SELECT 6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator', 4 FROM SYSIBM.SYSDUMMY1
   UNION ALL
-  SELECT 7, 'SERVICE.OPERATOR', 2, 'Service Operator' FROM SYSIBM.SYSDUMMY1;
+  SELECT 7, 'SERVICE.OPERATOR', 2, 'Service Operator', 5 FROM SYSIBM.SYSDUMMY1;
 
 INSERT INTO roleauthorization(authorization_id, authorization_name)
   SELECT 'VIEW.USE', 'Use View' FROM SYSIBM.SYSDUMMY1 UNION ALL

http://git-wip-us.apache.org/repos/asf/ambari/blob/2871d674/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
index 11e43c2..73b172a 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
@@ -533,6 +533,7 @@ CREATE TABLE adminpermission (
   permission_name VARCHAR(255) NOT NULL,
   resource_type_id INTEGER NOT NULL,
   permission_label VARCHAR(255),
+  sort_order SMALLINT NOT NULL DEFAULT 1,
   PRIMARY KEY(permission_id));
 
 CREATE TABLE roleauthorization (
@@ -1021,20 +1022,20 @@ insert into adminprincipal (principal_id, principal_type_id)
 insert into users(user_id, principal_id, user_name, user_password)
   select 1, 1, 'admin','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00';
 
-insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label)
-  select 1, 'AMBARI.ADMINISTRATOR', 1, 'Administrator'
+insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label, sort_order)
+  select 1, 'AMBARI.ADMINISTRATOR', 1, 'Administrator', 1
   union all
-  select 2, 'CLUSTER.USER', 2, 'Cluster User'
+  select 2, 'CLUSTER.USER', 2, 'Cluster User', 6
   union all
-  select 3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator'
+  select 3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator', 2
   union all
-  select 4, 'VIEW.USER', 3, 'View User'
+  select 4, 'VIEW.USER', 3, 'View User', 7
   union all
-  select 5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator'
+select 5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator', 3
   union all
-  select 6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator'
+  select 6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator', 4
   union all
-  select 7, 'SERVICE.OPERATOR', 2, 'Service Operator';
+  select 7, 'SERVICE.OPERATOR', 2, 'Service Operator', 5;
 
 INSERT INTO roleauthorization(authorization_id, authorization_name)
   SELECT 'VIEW.USE', 'Use View' UNION ALL

http://git-wip-us.apache.org/repos/asf/ambari/blob/2871d674/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
index 8d4ba28..1df396d 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
@@ -522,6 +522,7 @@ CREATE TABLE adminpermission (
   permission_name VARCHAR(255) NOT NULL,
   resource_type_id NUMBER(10) NOT NULL,
   permission_label VARCHAR(255),
+  sort_order SMALLINT DEFAULT 1 NOT NULL,
   PRIMARY KEY(permission_id));
 
 CREATE TABLE roleauthorization (
@@ -656,7 +657,7 @@ CREATE TABLE setting (
   name VARCHAR(255) NOT NULL UNIQUE,
   setting_type VARCHAR(255) NOT NULL,
   content CLOB NOT NULL,
-  updated_by VARCHAR(255) NOT NULL DEFAULT '_db',
+  updated_by VARCHAR(255) DEFAULT '_db' NOT NULL,
   update_timestamp NUMBER(19) NOT NULL,
   PRIMARY KEY (id)
 );
@@ -1013,20 +1014,20 @@ insert into adminprincipal (principal_id, principal_type_id)
 insert into users(user_id, principal_id, user_name, user_password)
 select 1,1,'admin','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00' from dual;
 
-insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label)
-  select 1, 'AMBARI.ADMINISTRATOR', 1, 'Administrator' from dual
+insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label, sort_order)
+  select 1, 'AMBARI.ADMINISTRATOR', 1, 'Administrator', 1 from dual
   union all
-  select 2, 'CLUSTER.USER', 2, 'Cluster User' from dual
+  select 2, 'CLUSTER.USER', 2, 'Cluster User', 6 from dual
   union all
-  select 3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator' from dual
+  select 3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator', 2 from dual
   union all
-  select 4, 'VIEW.USER', 3, 'View User' from dual
+  select 4, 'VIEW.USER', 3, 'View User', 7 from dual
   union all
-  select 5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator' from dual
+  select 5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator', 3 from dual
   union all
-  select 6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator' from dual
+  select 6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator', 4 from dual
   union all
-  select 7, 'SERVICE.OPERATOR', 2, 'Service Operator' from dual;
+  select 7, 'SERVICE.OPERATOR', 2, 'Service Operator', 5 from dual;
 
 INSERT INTO roleauthorization(authorization_id, authorization_name)
   SELECT 'VIEW.USE', 'Use View' FROM dual UNION ALL

http://git-wip-us.apache.org/repos/asf/ambari/blob/2871d674/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
index c762ac4..d948b3b 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
@@ -525,6 +525,7 @@ CREATE TABLE adminpermission (
   permission_name VARCHAR(255) NOT NULL,
   resource_type_id INTEGER NOT NULL,
   permission_label VARCHAR(255),
+  sort_order SMALLINT NOT NULL DEFAULT 1,
   PRIMARY KEY(permission_id));
 
 CREATE TABLE roleauthorization (
@@ -1058,20 +1059,20 @@ INSERT INTO adminprincipal (principal_id, principal_type_id)
 INSERT INTO Users (user_id, principal_id, user_name, user_password)
   SELECT 1, 1, 'admin', '538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00';
 
-insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label)
-  SELECT 1, 'AMBARI.ADMINISTRATOR', 1, 'Administrator'
+insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label, sort_order)
+  SELECT 1, 'AMBARI.ADMINISTRATOR', 1, 'Administrator', 1
   UNION ALL
-  SELECT 2, 'CLUSTER.USER', 2, 'Cluster User'
+  SELECT 2, 'CLUSTER.USER', 2, 'Cluster User', 6
   UNION ALL
-  SELECT 3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator'
+  SELECT 3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator', 2
   UNION ALL
-  SELECT 4, 'VIEW.USER', 3, 'View User'
+  SELECT 4, 'VIEW.USER', 3, 'View User', 7
   UNION ALL
-  SELECT 5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator'
+  SELECT 5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator', 3
   UNION ALL
-  SELECT 6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator'
+  SELECT 6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator', 4
   UNION ALL
-  SELECT 7, 'SERVICE.OPERATOR', 2, 'Service Operator';
+  SELECT 7, 'SERVICE.OPERATOR', 2, 'Service Operator', 5;
 
 INSERT INTO roleauthorization(authorization_id, authorization_name)
   SELECT 'VIEW.USE', 'Use View' UNION ALL

http://git-wip-us.apache.org/repos/asf/ambari/blob/2871d674/ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql
index 81b41fe..5fe742c 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql
@@ -586,6 +586,7 @@ CREATE TABLE ambari.adminpermission (
   permission_name VARCHAR(255) NOT NULL,
   resource_type_id INTEGER NOT NULL,
   permission_label VARCHAR(255),
+  sort_order SMALLINT NOT NULL DEFAULT 1,
   PRIMARY KEY(permission_id));
 
 CREATE TABLE ambari.roleauthorization (
@@ -1157,20 +1158,20 @@ INSERT INTO ambari.adminprincipal (principal_id, principal_type_id)
 INSERT INTO ambari.Users (user_id, principal_id, user_name, user_password)
   SELECT 1, 1, 'admin', '538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00';
 
-insert into ambari.adminpermission(permission_id, permission_name, resource_type_id, permission_label)
-  SELECT 1, 'AMBARI.ADMINISTRATOR', 1, 'Administrator'
+insert into ambari.adminpermission(permission_id, permission_name, resource_type_id, permission_label, sort_order)
+  SELECT 1, 'AMBARI.ADMINISTRATOR', 1, 'Administrator', 1
   UNION ALL
-  SELECT 2, 'CLUSTER.USER', 2, 'Cluster User'
+  SELECT 2, 'CLUSTER.USER', 2, 'Cluster User', 6
   UNION ALL
-  SELECT 3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator'
+  SELECT 3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator', 2
   UNION ALL
-  SELECT 4, 'VIEW.USER', 3, 'View User'
+  SELECT 4, 'VIEW.USER', 3, 'View User', 7
   UNION ALL
-  SELECT 5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator'
+  SELECT 5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator', 3
   UNION ALL
-  SELECT 6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator'
+  SELECT 6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator', 4
   UNION ALL
-  SELECT 7, 'SERVICE.OPERATOR', 2, 'Service Operator';
+  SELECT 7, 'SERVICE.OPERATOR', 2, 'Service Operator', 5;
 
 INSERT INTO ambari.roleauthorization(authorization_id, authorization_name)
   SELECT 'VIEW.USE', 'Use View' UNION ALL

http://git-wip-us.apache.org/repos/asf/ambari/blob/2871d674/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
index f8c9b8d..fe1e505 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
@@ -522,6 +522,7 @@ CREATE TABLE adminpermission (
   permission_name VARCHAR(255) NOT NULL,
   resource_type_id INTEGER NOT NULL,
   permission_label VARCHAR(255),
+  sort_order SMALLINT NOT NULL DEFAULT 1,
   PRIMARY KEY(permission_id));
 
 CREATE TABLE roleauthorization (
@@ -1008,20 +1009,20 @@ insert into adminprincipal (principal_id, principal_type_id)
 insert into users(user_id, principal_id, user_name, user_password)
   select 1, 1, 'admin','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00';
 
-insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label)
-  select 1, 'AMBARI.ADMINISTRATOR', 1, 'Administrator'
+insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label, sort_order)
+  select 1, 'AMBARI.ADMINISTRATOR', 1, 'Administrator', 1
   union all
-  select 2, 'CLUSTER.USER', 2, 'Cluster User'
+  select 2, 'CLUSTER.USER', 2, 'Cluster User', 6
   union all
-  select 3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator'
+  select 3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator', 2
   union all
-  select 4, 'VIEW.USER', 3, 'View User'
+  select 4, 'VIEW.USER', 3, 'View User'. 7
   union all
-  select 5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator'
+  select 5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator', 3
   union all
-  select 6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator'
+  select 6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator', 4
   union all
-  select 7, 'SERVICE.OPERATOR', 2, 'Service Operator';
+  select 7, 'SERVICE.OPERATOR', 2, 'Service Operator', 5;
 
   INSERT INTO roleauthorization(authorization_id, authorization_name)
     SELECT 'VIEW.USE', 'Use View' UNION ALL

http://git-wip-us.apache.org/repos/asf/ambari/blob/2871d674/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
index 324c24d..41dac77 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
@@ -617,6 +617,7 @@ CREATE TABLE adminpermission (
   permission_name VARCHAR(255) NOT NULL,
   resource_type_id INTEGER NOT NULL,
   permission_label VARCHAR(255),
+  sort_order SMALLINT NOT NULL DEFAULT 1,
   PRIMARY KEY CLUSTERED (permission_id)
   );
 
@@ -1123,15 +1124,15 @@ BEGIN TRANSACTION
   insert into users(user_id, principal_id, user_name, user_password)
     select 1, 1, 'admin','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00';
 
-  insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label)
+  insert into adminpermission(permission_id, permission_name, resource_type_id, permission_label, sort_order)
   values
-    (1, 'AMBARI.ADMINISTRATOR', 1, 'Administrator'),
-    (2, 'CLUSTER.USER', 2, 'Cluster User'),
-    (3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator'),
-    (4, 'VIEW.USER', 3, 'View User'),
-    (5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator'),
-    (6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator'),
-    (7, 'SERVICE.OPERATOR', 2, 'Service Operator');
+    (1, 'AMBARI.ADMINISTRATOR', 1, 'Administrator', 1),
+    (2, 'CLUSTER.USER', 2, 'Cluster User', 6),
+    (3, 'CLUSTER.ADMINISTRATOR', 2, 'Cluster Administrator', 2),
+    (4, 'VIEW.USER', 3, 'View User', 7),
+    (5, 'CLUSTER.OPERATOR', 2, 'Cluster Operator', 3),
+    (6, 'SERVICE.ADMINISTRATOR', 2, 'Service Administrator', 4),
+    (7, 'SERVICE.OPERATOR', 2, 'Service Operator', 5);
 
   INSERT INTO roleauthorization(authorization_id, authorization_name)
     SELECT 'VIEW.USE', 'Use View' UNION ALL

http://git-wip-us.apache.org/repos/asf/ambari/blob/2871d674/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PermissionResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PermissionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PermissionResourceProviderTest.java
index 7658c0f..fb4454e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PermissionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PermissionResourceProviderTest.java
@@ -84,6 +84,7 @@ public class PermissionResourceProviderTest {
     expect(permissionEntity.getId()).andReturn(99);
     expect(permissionEntity.getPermissionName()).andReturn("AMBARI.ADMINISTRATOR");
     expect(permissionEntity.getPermissionLabel()).andReturn("Administrator");
+    expect(permissionEntity.getSortOrder()).andReturn(1);
     expect(permissionEntity.getResourceType()).andReturn(resourceTypeEntity);
     expect(resourceTypeEntity.getName()).andReturn("AMBARI");
 
@@ -98,6 +99,7 @@ public class PermissionResourceProviderTest {
     Assert.assertEquals("AMBARI.ADMINISTRATOR", resource.getPropertyValue(PermissionResourceProvider.PERMISSION_NAME_PROPERTY_ID));
     Assert.assertEquals("Administrator", resource.getPropertyValue(PermissionResourceProvider.PERMISSION_LABEL_PROPERTY_ID));
     Assert.assertEquals("AMBARI", resource.getPropertyValue(PermissionResourceProvider.RESOURCE_NAME_PROPERTY_ID));
+    Assert.assertEquals(1, resource.getPropertyValue(PermissionResourceProvider.SORT_ORDER_PROPERTY_ID));
     verify(dao, permissionEntity, resourceTypeEntity);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2871d674/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
index d1d68f2..608a348 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
@@ -25,25 +25,32 @@ import com.google.inject.Provider;
 import com.google.inject.persist.PersistService;
 import junit.framework.Assert;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.entities.StackEntity;
+import org.easymock.Capture;
+import org.easymock.CaptureType;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
 import javax.persistence.EntityManager;
+import java.lang.reflect.Field;
 import java.lang.reflect.Method;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
+import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.createMockBuilder;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.createStrictMock;
+import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.newCapture;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.reset;
 import static org.easymock.EasyMock.verify;
@@ -79,28 +86,72 @@ public class UpgradeCatalog240Test {
   }
 
   @Test
+  public void testExecuteDDLUpdates() throws Exception {
+    UpgradeCatalog240 upgradeCatalog240 = injector.getInstance(UpgradeCatalog240.class);
+
+    Capture<DBAccessor.DBColumnInfo> capturedColumnInfo = newCapture();
+
+    DBAccessor dbAccessor = createStrictMock(DBAccessor.class);
+    dbAccessor.addColumn(eq("adminpermission"), capture(capturedColumnInfo));
+    expectLastCall().once();
+
+    Field field = AbstractUpgradeCatalog.class.getDeclaredField("dbAccessor");
+    field.set(upgradeCatalog240, dbAccessor);
+
+    replay(dbAccessor);
+
+    upgradeCatalog240.executeDDLUpdates();
+
+    verify(dbAccessor);
+
+    DBAccessor.DBColumnInfo columnInfo = capturedColumnInfo.getValue();
+    Assert.assertNotNull(columnInfo);
+    Assert.assertEquals(UpgradeCatalog240.SORT_ORDER_COL, columnInfo.getName());
+    Assert.assertEquals(null, columnInfo.getLength());
+    Assert.assertEquals(Short.class, columnInfo.getType());
+    Assert.assertEquals(1, columnInfo.getDefaultValue());
+    Assert.assertEquals(false, columnInfo.isNullable());
+  }
+
+  @Test
   public void testExecuteDMLUpdates() throws Exception {
     Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
     Method updateAlerts = UpgradeCatalog240.class.getDeclaredMethod("updateAlerts");
 
+    Capture<String> capturedStatements = newCapture(CaptureType.ALL);
 
+    DBAccessor dbAccessor = createStrictMock(DBAccessor.class);
+    expect(dbAccessor.executeUpdate(capture(capturedStatements))).andReturn(1).times(7);
 
     UpgradeCatalog240 upgradeCatalog240 = createMockBuilder(UpgradeCatalog240.class)
             .addMockedMethod(addNewConfigurationsFromXml)
             .addMockedMethod(updateAlerts)
             .createMock();
 
+    Field field = AbstractUpgradeCatalog.class.getDeclaredField("dbAccessor");
+    field.set(upgradeCatalog240, dbAccessor);
+
     upgradeCatalog240.addNewConfigurationsFromXml();
     expectLastCall().once();
     upgradeCatalog240.updateAlerts();
     expectLastCall().once();
 
-
-    replay(upgradeCatalog240);
+    replay(upgradeCatalog240, dbAccessor);
 
     upgradeCatalog240.executeDMLUpdates();
 
-    verify(upgradeCatalog240);
+    verify(upgradeCatalog240, dbAccessor);
+
+    List<String> statements = capturedStatements.getValues();
+    Assert.assertNotNull(statements);
+    Assert.assertEquals(7, statements.size());
+    Assert.assertTrue(statements.contains("UPDATE adminpermission SET sort_order=1 WHERE permission_name='AMBARI.ADMINISTRATOR'"));
+    Assert.assertTrue(statements.contains("UPDATE adminpermission SET sort_order=2 WHERE permission_name='CLUSTER.ADMINISTRATOR'"));
+    Assert.assertTrue(statements.contains("UPDATE adminpermission SET sort_order=3 WHERE permission_name='CLUSTER.OPERATOR'"));
+    Assert.assertTrue(statements.contains("UPDATE adminpermission SET sort_order=4 WHERE permission_name='SERVICE.ADMINISTRATOR'"));
+    Assert.assertTrue(statements.contains("UPDATE adminpermission SET sort_order=5 WHERE permission_name='SERVICE.OPERATOR'"));
+    Assert.assertTrue(statements.contains("UPDATE adminpermission SET sort_order=6 WHERE permission_name='CLUSTER.USER'"));
+    Assert.assertTrue(statements.contains("UPDATE adminpermission SET sort_order=7 WHERE permission_name='VIEW.USER'"));
   }
 
   @Test


[36/50] [abbrv] ambari git commit: AMBARI-15049 Sometimes background operations have incorrect order. (atkach)

Posted by jo...@apache.org.
AMBARI-15049 Sometimes background operations have incorrect order. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7f3928ba
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7f3928ba
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7f3928ba

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 7f3928bad958f9c57bb21afd43634e3d9ddfedff
Parents: c86964b
Author: Andrii Tkach <at...@hortonworks.com>
Authored: Mon Feb 15 17:24:48 2016 +0200
Committer: Andrii Tkach <at...@hortonworks.com>
Committed: Mon Feb 15 17:48:35 2016 +0200

----------------------------------------------------------------------
 .../global/background_operations_controller.js      | 15 +++++++++++----
 ambari-web/app/utils/host_progress_popup.js         | 10 ++++++----
 .../global/background_operations_test.js            | 16 ++++++++++++++++
 3 files changed, 33 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7f3928ba/ambari-web/app/controllers/global/background_operations_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/global/background_operations_controller.js b/ambari-web/app/controllers/global/background_operations_controller.js
index 76c517c..4156e46 100644
--- a/ambari-web/app/controllers/global/background_operations_controller.js
+++ b/ambari-web/app/controllers/global/background_operations_controller.js
@@ -261,19 +261,26 @@ App.BackgroundOperationsController = Em.Controller.extend({
     this.set('isShowMoreAvailable', countGot >= countIssued);
     this.set('serviceTimestamp', App.dateTimeWithTimeZone());
   },
+
   isShowMoreAvailable: null,
+
   /**
    * remove old requests
    * as API returns 10, or  20 , or 30 ...etc latest request, the requests that absent in response should be removed
    * @param currentRequestIds
    */
   removeOldRequests: function (currentRequestIds) {
-    this.get('services').forEach(function (service, index, services) {
-      if (!currentRequestIds.contains(service.id)) {
-        services.splice(index, 1);
+    var services = this.get('services');
+
+    for (var i = 0, l = services.length; i < l; i++) {
+      if (!currentRequestIds.contains(services[i].id)) {
+        services.splice(i, 1);
+        i--;
+        l--;
       }
-    });
+    }
   },
+
   /**
    * identify whether request is running by task counters
    * @param request

http://git-wip-us.apache.org/repos/asf/ambari/blob/7f3928ba/ambari-web/app/utils/host_progress_popup.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/host_progress_popup.js b/ambari-web/app/utils/host_progress_popup.js
index 726a96a..395afb5 100644
--- a/ambari-web/app/utils/host_progress_popup.js
+++ b/ambari-web/app/utils/host_progress_popup.js
@@ -523,11 +523,13 @@ App.HostPopup = Em.Object.create({
    * @method removeOldServices
    */
   removeOldServices: function (services, currentServicesIds) {
-    services.forEach(function (service, index, services) {
-      if (!currentServicesIds.contains(service.id)) {
-        services.removeAt(index, 1);
+    for (var i = 0, l = services.length; i < l; i++) {
+      if (!currentServicesIds.contains(services[i].id)) {
+        services.splice(i, 1);
+        i--;
+        l--;
       }
-    });
+    }
   },
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/7f3928ba/ambari-web/test/controllers/global/background_operations_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/global/background_operations_test.js b/ambari-web/test/controllers/global/background_operations_test.js
index 445eeb0..b22c105 100644
--- a/ambari-web/test/controllers/global/background_operations_test.js
+++ b/ambari-web/test/controllers/global/background_operations_test.js
@@ -310,6 +310,22 @@ describe('App.BackgroundOperationsController', function () {
         result: [
           {id: 2}
         ]
+      },
+      {
+        title: 'two old request and two current',
+        content: {
+          currentRequestIds: [3, 4],
+          services: [
+            {id: 1},
+            {id: 2},
+            {id: 3},
+            {id: 4}
+          ]
+        },
+        result: [
+          {id: 3},
+          {id: 4}
+        ]
       }
     ];
 


[31/50] [abbrv] ambari git commit: Revert "AMBARI-14933 ranger audit db password is required even when audit to db is off.(ababiichuk)"

Posted by jo...@apache.org.
Revert "AMBARI-14933 ranger audit db password is required even when audit to db is off.(ababiichuk)"

This reverts commit 94932c99bb8ff5ad309c15c6a0ecf382f428c73b.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/24447458
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/24447458
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/24447458

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 2444745889faed0ccf38c3696c8e91f7e32e4c5b
Parents: 1319763
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Sat Feb 13 13:04:57 2016 +0530
Committer: Jaimin Jetly <ja...@hortonworks.com>
Committed: Sat Feb 13 13:04:57 2016 +0530

----------------------------------------------------------------------
 .../services/RANGER/themes/theme_version_2.json | 20 +-------------------
 .../configs/widgets/config_widget_view.js       |  3 ---
 2 files changed, 1 insertion(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/24447458/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/themes/theme_version_2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/themes/theme_version_2.json b/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/themes/theme_version_2.json
index cbd27e4..59e58a4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/themes/theme_version_2.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/themes/theme_version_2.json
@@ -940,25 +940,7 @@
         },
         {
           "config": "admin-properties/audit_db_password",
-          "subsection-name": "subsection-ranger-audit-db-row2-col2",
-          "depends-on": [
-            {
-              "configs":[
-                "ranger-env/xasecure.audit.destination.db"
-              ],
-              "if": "${ranger-env/xasecure.audit.destination.db}",
-              "then": {
-                "property_value_attributes": {
-                  "visible": true
-                }
-              },
-              "else": {
-                "property_value_attributes": {
-                  "visible": false
-                }
-              }
-            }
-          ]
+          "subsection-name": "subsection-ranger-audit-db-row2-col2"
         },
         {
           "config": "ranger-env/xasecure.audit.destination.solr",

http://git-wip-us.apache.org/repos/asf/ambari/blob/24447458/ambari-web/app/views/common/configs/widgets/config_widget_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/widgets/config_widget_view.js b/ambari-web/app/views/common/configs/widgets/config_widget_view.js
index e9eaed2..9858b75 100644
--- a/ambari-web/app/views/common/configs/widgets/config_widget_view.js
+++ b/ambari-web/app/views/common/configs/widgets/config_widget_view.js
@@ -434,9 +434,6 @@ App.ConfigWidgetView = Em.View.extend(App.SupportsDependentConfigs, App.WidgetPo
         var conditionalConfig = serviceConfigs.filterProperty('filename',conditionalConfigFileName).findProperty('name', conditionalConfigName);
         if (conditionalConfig) {
           conditionalConfig.set(valueAttribute, valueAttributes[key]);
-          if (valueAttribute === 'isVisible') {
-            conditionalConfig.set('hiddenBySection', !valueAttributes[key]);
-          }
         }
       }
     }


[20/50] [abbrv] ambari git commit: AMBARI-15016 Cover hosts views with unit tests. (atkach)

Posted by jo...@apache.org.
AMBARI-15016 Cover hosts views with unit tests. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/30438e90
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/30438e90
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/30438e90

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 30438e9052d5782d8e2b5fe4b28c6adf9d78c5d7
Parents: c47fff3
Author: Andrii Tkach <at...@hortonworks.com>
Authored: Thu Feb 11 21:19:34 2016 +0200
Committer: Andrii Tkach <at...@hortonworks.com>
Committed: Fri Feb 12 12:09:23 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/assets/test/tests.js             |   4 +
 .../app/controllers/wizard/step7_controller.js  |   4 +-
 .../utils/configs/rm_ha_config_initializer.js   |   2 +-
 .../admin/highAvailability/progress_view.js     |   4 +-
 ambari-web/app/views/main/host/add_view.js      |   3 +
 .../app/views/main/host/configs_service.js      |   3 +
 ambari-web/app/views/main/host/menu.js          |   6 +-
 ambari-web/app/views/main/host/summary.js       |   9 +-
 .../highAvailability/progress_view_test.js      |   4 +-
 .../test/views/main/host/add_view_test.js       | 141 ++++++++++
 .../views/main/host/combo_search_box_test.js    |  42 +++
 .../views/main/host/config_service_menu_test.js | 140 ++++++++++
 .../test/views/main/host/config_service_test.js |  46 +++
 .../views/main/host/host_alerts_view_test.js    | 140 +++++++++-
 ambari-web/test/views/main/host/menu_test.js    |  43 ++-
 ambari-web/test/views/main/host/summary_test.js | 277 +++++++++++++------
 16 files changed, 772 insertions(+), 96 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/app/assets/test/tests.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/test/tests.js b/ambari-web/app/assets/test/tests.js
index ecf55f7..44fb4f4 100644
--- a/ambari-web/app/assets/test/tests.js
+++ b/ambari-web/app/assets/test/tests.js
@@ -253,6 +253,10 @@ var files = [
   'test/views/main/host/menu_test',
   'test/views/main/host/stack_versions_view_test',
   'test/views/main/host/host_alerts_view_test',
+  'test/views/main/host/combo_search_box_test',
+  'test/views/main/host/config_service_test',
+  'test/views/main/host/add_view_test',
+  'test/views/main/host/config_service_menu_test',
   'test/views/main/host/details/host_component_view_test',
   'test/views/main/host/details/host_component_views/decommissionable_test',
   'test/views/main/host/details/host_component_views/datanode_view_test',

http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/app/controllers/wizard/step7_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js b/ambari-web/app/controllers/wizard/step7_controller.js
index c6f4689..7e96845 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -867,8 +867,8 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
    * @returns {Object[]} existing configs + additional config parameters in yarn-client.xml
    */
   addHawqConfigsOnRMHa: function(configs) {
-    rmHost1 = configs.findProperty('id', 'yarn.resourcemanager.hostname.rm1__yarn-site').value ;
-    rmHost2 = configs.findProperty('id', 'yarn.resourcemanager.hostname.rm2__yarn-site').value ;
+    var rmHost1 = configs.findProperty('id', 'yarn.resourcemanager.hostname.rm1__yarn-site').value ;
+    var rmHost2 = configs.findProperty('id', 'yarn.resourcemanager.hostname.rm2__yarn-site').value ;
     var yarnConfigToBeAdded = [
       {
         name: 'yarn.resourcemanager.ha',

http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/app/utils/configs/rm_ha_config_initializer.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/rm_ha_config_initializer.js b/ambari-web/app/utils/configs/rm_ha_config_initializer.js
index 9c36323..dac68d9 100644
--- a/ambari-web/app/utils/configs/rm_ha_config_initializer.js
+++ b/ambari-web/app/utils/configs/rm_ha_config_initializer.js
@@ -89,7 +89,7 @@ App.RmHaConfigInitializer = App.HaConfigInitializerClass.create(App.HostsBasedIn
    */
   _initRmHaHostsWithPort: function (configProperty, localDB, dependencies, initializer) {
     var rmHosts = localDB.masterComponentHosts.filterProperty('component', 'RESOURCEMANAGER').getEach('hostName');
-    for (rmHost in rmHosts) {
+    for (var rmHost in rmHosts) {
       rmHosts[rmHost] = rmHosts[rmHost] + ":" + initializer.port;
     }
     var value = rmHosts.join(',');

http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/app/views/main/admin/highAvailability/progress_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/highAvailability/progress_view.js b/ambari-web/app/views/main/admin/highAvailability/progress_view.js
index abe6685..fb793e2 100644
--- a/ambari-web/app/views/main/admin/highAvailability/progress_view.js
+++ b/ambari-web/app/views/main/admin/highAvailability/progress_view.js
@@ -43,7 +43,7 @@ App.HighAvailabilityProgressPageView = Em.View.extend(App.wizardProgressPageView
     if (currentStep === 1) {
       return  Em.I18n.t('admin.highAvailability.wizard.rollback.header.title');
     } else {
-      return  Em.I18n.t('admin.highAvailability.wizard.step' + currentStep + '.header.title');
+      return  Em.I18n.t('admin.highAvailability.wizard.step' + currentStep + '.header');
     }
   }.property(),
 
@@ -55,7 +55,7 @@ App.HighAvailabilityProgressPageView = Em.View.extend(App.wizardProgressPageView
     if (currentStep === 1) {
       return  Em.I18n.t('admin.highAvailability.rollback.notice.inProgress');
     } else {
-      return  Em.I18n.t('admin.highAvailability.wizard.step' + currentStep + '.notice.inProgress');
+      return  Em.I18n.t('admin.highAvailability.wizard.progressPage.notice.inProgress');
     }
   }.property(),
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/app/views/main/host/add_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/host/add_view.js b/ambari-web/app/views/main/host/add_view.js
index 33fd123..2047f47 100644
--- a/ambari-web/app/views/main/host/add_view.js
+++ b/ambari-web/app/views/main/host/add_view.js
@@ -23,6 +23,9 @@ App.AddHostView = Em.View.extend(App.WizardMenuMixin, {
 
   templateName: require('templates/main/host/add'),
 
+  /**
+   * @type {boolean}
+   */
   isLoaded: false,
 
   willInsertElement: function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/app/views/main/host/configs_service.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/host/configs_service.js b/ambari-web/app/views/main/host/configs_service.js
index eb3fc56..5bd1ef5 100644
--- a/ambari-web/app/views/main/host/configs_service.js
+++ b/ambari-web/app/views/main/host/configs_service.js
@@ -25,6 +25,9 @@ App.MainHostServiceConfigsView = Em.View.extend({
     this.get('controller').loadStep();
   },
 
+  /**
+   * @type {boolean}
+   */
   isConfigsEditable: false,
 
   content: Em.computed.alias('App.router.mainHostDetailsController.content')

http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/app/views/main/host/menu.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/host/menu.js b/ambari-web/app/views/main/host/menu.js
index a53c21a..5540014 100644
--- a/ambari-web/app/views/main/host/menu.js
+++ b/ambari-web/app/views/main/host/menu.js
@@ -99,9 +99,7 @@ App.MainHostMenuView = Em.CollectionView.extend({
   },
 
   deactivateChildViews: function () {
-    $.each(this._childViews, function () {
-      this.set('active', "");
-    });
+    this.get('_childViews').setEach('active', '');
   },
 
   itemViewClass: Em.View.extend({
@@ -113,4 +111,4 @@ App.MainHostMenuView = Em.CollectionView.extend({
     '{{view.content.badgeText}}' +
     '</span>  {{/if}}</a>{{/unless}}')
   })
-});
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/app/views/main/host/summary.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/host/summary.js b/ambari-web/app/views/main/host/summary.js
index 464d66f..aa12581 100644
--- a/ambari-web/app/views/main/host/summary.js
+++ b/ambari-web/app/views/main/host/summary.js
@@ -54,9 +54,7 @@ App.MainHostSummaryView = Em.View.extend(App.TimeRangeMixin, {
   /**
    * Host metrics panel not displayed when Metrics service (ex:Ganglia) is not in stack definition.
    */
-  isNoHostMetricsService: function() {
-    return !App.get('services.hostMetrics').length;
-  }.property('App.services.hostMetrics'),
+  isNoHostMetricsService: Em.computed.equal('App.services.hostMetrics.length', 0),
 
   /**
    * Message for "restart" block
@@ -242,11 +240,10 @@ App.MainHostSummaryView = Em.View.extend(App.TimeRangeMixin, {
     var clientComponents = App.StackServiceComponent.find().filterProperty('isClient');
     var installedServices = this.get('installedServices');
     var installedClients = this.get('clients').mapProperty('componentName');
-    var installableClients = clientComponents.filter(function(component) {
+    return clientComponents.filter(function(component) {
       // service for current client is installed but client isn't installed on current host
       return installedServices.contains(component.get('serviceName')) && !installedClients.contains(component.get('componentName'));
     });
-    return installableClients;
   }.property('content.hostComponents.length', 'installedServices.length'),
 
   notInstalledClientComponents: function () {
@@ -280,7 +277,7 @@ App.MainHostSummaryView = Em.View.extend(App.TimeRangeMixin, {
       });
     }
     return components;
-  }.property('content.hostComponents.length', 'installableClientComponents', 'App.components.addableToHost.@each'),
+  }.property('content.hostComponents.length', 'App.components.addableToHost.@each'),
 
   /**
    * Formatted with <code>$.timeago</code> value of host's last heartbeat

http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/test/views/main/admin/highAvailability/progress_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/highAvailability/progress_view_test.js b/ambari-web/test/views/main/admin/highAvailability/progress_view_test.js
index e68c2f1..76902f6 100644
--- a/ambari-web/test/views/main/admin/highAvailability/progress_view_test.js
+++ b/ambari-web/test/views/main/admin/highAvailability/progress_view_test.js
@@ -57,7 +57,7 @@ describe('App.HighAvailabilityProgressPageView', function () {
     it("currentStep is 2", function () {
       this.mock.returns(2);
       view.propertyDidChange('headerTitle');
-      expect(view.get('headerTitle')).to.equal(Em.I18n.t('admin.highAvailability.wizard.step2.header.title'));
+      expect(view.get('headerTitle')).to.equal(Em.I18n.t('admin.highAvailability.wizard.step2.header'));
     });
   });
 
@@ -76,7 +76,7 @@ describe('App.HighAvailabilityProgressPageView', function () {
     it("currentStep is 2", function () {
       this.mock.returns(2);
       view.propertyDidChange('noticeInProgress');
-      expect(view.get('noticeInProgress')).to.equal(Em.I18n.t('admin.highAvailability.wizard.step2.notice.inProgress'));
+      expect(view.get('noticeInProgress')).to.equal(Em.I18n.t('admin.highAvailability.wizard.progressPage.notice.inProgress'));
     });
   });
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/test/views/main/host/add_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/host/add_view_test.js b/ambari-web/test/views/main/host/add_view_test.js
new file mode 100644
index 0000000..785eac4
--- /dev/null
+++ b/ambari-web/test/views/main/host/add_view_test.js
@@ -0,0 +1,141 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+var testHelpers = require('test/helpers');
+var view;
+
+describe('App.AddHostView', function () {
+
+  beforeEach(function () {
+    view = App.AddHostView.create({
+      controller: Em.Object.create({
+        getDBProperty: Em.K,
+        setDBProperty: Em.K,
+        content: Em.Object.create()
+      })
+    });
+  });
+
+  describe("#willInsertElement()", function() {
+
+    beforeEach(function() {
+      sinon.stub(view, 'loadHosts');
+      this.mock = sinon.stub(view.get('controller'), 'getDBProperty');
+    });
+    afterEach(function() {
+      view.loadHosts.restore();
+      this.mock.restore();
+    });
+
+    it("hosts saved in DB", function() {
+      this.mock.returns(['host1']);
+      view.willInsertElement();
+      expect(view.get('isLoaded')).to.be.true;
+      expect(view.loadHosts.calledOnce).to.be.false;
+    });
+    it("hosts not saved in DB", function() {
+      this.mock.returns(null);
+      view.willInsertElement();
+      expect(view.get('isLoaded')).to.be.false;
+      expect(view.loadHosts.calledOnce).to.be.true;
+    });
+  });
+
+  describe("#loadHosts()", function() {
+
+    it("App.ajax.send should be called", function() {
+      view.loadHosts();
+      var args = testHelpers.filterAjaxRequests('name', 'hosts.confirmed');
+      expect(args[0][0]).to.eql({
+        name: 'hosts.confirmed',
+        sender: view,
+        data: {},
+        success: 'loadHostsSuccessCallback',
+        error: 'loadHostsErrorCallback'
+      });
+    });
+  });
+
+  describe("#loadHostsSuccessCallback()", function() {
+
+    beforeEach(function() {
+      sinon.stub(view.get('controller'), 'setDBProperty');
+    });
+    afterEach(function() {
+      view.get('controller').setDBProperty.restore();
+    });
+
+    it("should save hosts to DB", function() {
+      var response = {items: [
+        {
+          Hosts: {
+            host_name: 'host1',
+            cpu_count: 1,
+            total_mem: 1024,
+            disk_info: {}
+          },
+          host_components: [
+            {
+              component_name: 'C1'
+            }
+          ]
+        }
+      ]};
+      view.loadHostsSuccessCallback(response);
+      expect(view.get('isLoaded')).to.be.true;
+      expect(view.get('controller').setDBProperty.calledWith('hosts', {
+        host1: {
+          name: 'host1',
+          cpu: 1,
+          memory: 1024,
+          disk_info: {},
+          bootStatus: "REGISTERED",
+          isInstalled: true,
+          hostComponents: [
+            {
+              component_name: 'C1'
+            }
+          ]
+        }
+      })).to.be.true;
+      expect(view.get('controller.content.hosts')).to.eql({
+        host1: {
+          name: 'host1',
+          cpu: 1,
+          memory: 1024,
+          disk_info: {},
+          bootStatus: "REGISTERED",
+          isInstalled: true,
+          hostComponents: [
+            {
+              component_name: 'C1'
+            }
+          ]
+        }
+      });
+    });
+  });
+
+  describe("#loadHostsErrorCallback()", function() {
+    it("isLoaded should be set to true", function() {
+      view.loadHostsErrorCallback();
+      expect(view.get('isLoaded')).to.be.true;
+    });
+  });
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/test/views/main/host/combo_search_box_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/host/combo_search_box_test.js b/ambari-web/test/views/main/host/combo_search_box_test.js
new file mode 100644
index 0000000..9c2e7b6
--- /dev/null
+++ b/ambari-web/test/views/main/host/combo_search_box_test.js
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+var view;
+
+describe('App.MainHostComboSearchBoxView', function () {
+
+  beforeEach(function () {
+    view = App.MainHostComboSearchBoxView.create();
+  });
+
+  describe("#didInsertElement()", function() {
+
+    beforeEach(function() {
+      sinon.stub(view, 'initVS');
+    });
+    afterEach(function() {
+      view.initVS.restore();
+    });
+
+    it("initVS should be called", function() {
+      view.didInsertElement();
+      expect(view.initVS.calledOnce).to.be.true;
+    });
+  });
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/test/views/main/host/config_service_menu_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/host/config_service_menu_test.js b/ambari-web/test/views/main/host/config_service_menu_test.js
new file mode 100644
index 0000000..6d0481e
--- /dev/null
+++ b/ambari-web/test/views/main/host/config_service_menu_test.js
@@ -0,0 +1,140 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+var misc = require('utils/misc');
+var view;
+
+describe('App.MainHostServiceMenuView', function () {
+
+  beforeEach(function () {
+    view = App.MainHostServiceMenuView.create({
+      host: Em.Object.create(),
+      controller: Em.Object.create({
+        connectOutlet: Em.K
+      })
+    });
+  });
+
+  describe("#content", function() {
+
+    beforeEach(function() {
+      sinon.stub(App, 'get').returns([]);
+      sinon.stub(App.StackService, 'find').returns([]);
+      sinon.stub(misc, 'sortByOrder', function(stackServices, services) {
+        return services;
+      });
+    });
+    afterEach(function() {
+      App.StackService.find.restore();
+      misc.sortByOrder.restore();
+      App.get.restore();
+    });
+
+    it("no hostComponents", function() {
+      view.set('host', Em.Object.create({
+        hostComponents: null
+      }));
+      view.propertyDidChange('content');
+      expect(view.get('content')).to.be.empty;
+    });
+
+    it("hostComponents without service", function() {
+      view.set('host', Em.Object.create({
+        hostComponents: [
+          Em.Object.create({
+            service: null
+          })
+        ]
+      }));
+      view.propertyDidChange('content');
+      expect(view.get('content')).to.be.empty;
+    });
+
+    it("hostComponents with service", function() {
+      view.set('host', Em.Object.create({
+        hostComponents: [
+          Em.Object.create({
+            service: Em.Object.create({
+              serviceName: 'S1'
+            })
+          })
+        ]
+      }));
+      view.propertyDidChange('content');
+      expect(view.get('content').mapProperty('serviceName')).to.eql(['S1']);
+    });
+
+    it("hostComponents with the same services", function() {
+      view.set('host', Em.Object.create({
+        hostComponents: [
+          Em.Object.create({
+            service: Em.Object.create({
+              serviceName: 'S1'
+            })
+          }),
+          Em.Object.create({
+            service: Em.Object.create({
+              serviceName: 'S1'
+            })
+          })
+        ]
+      }));
+      view.propertyDidChange('content');
+      expect(view.get('content').mapProperty('serviceName')).to.eql(['S1']);
+    });
+  });
+
+
+
+  describe("#showHostService()", function() {
+
+    beforeEach(function() {
+      sinon.stub(view.get('controller'), 'connectOutlet');
+    });
+    afterEach(function() {
+      view.get('controller').connectOutlet.restore()
+    });
+
+    it("service is absent", function() {
+      view.showHostService({contexts: []});
+      expect(view.get('controller').connectOutlet.called).to.be.false;
+    });
+
+    it("service is present", function() {
+      view.showHostService({contexts: [{serviceName: 'S1'}]});
+      expect(view.get('controller').connectOutlet.calledWith('service_config_outlet', 'mainHostServiceConfigs', {serviceName: 'S1', host: Em.Object.create()})).to.be.true;
+    });
+  });
+
+  describe("#didInsertElement()", function() {
+
+    beforeEach(function() {
+      sinon.stub(view, 'showHostService');
+    });
+    afterEach(function() {
+      view.showHostService.restore();
+    });
+
+    it("showHostService should be called", function() {
+      view.didInsertElement();
+      expect(view.showHostService.calledWith({contexts: [undefined]})).to.be.true;
+    });
+  });
+
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/test/views/main/host/config_service_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/host/config_service_test.js b/ambari-web/test/views/main/host/config_service_test.js
new file mode 100644
index 0000000..bcd0660
--- /dev/null
+++ b/ambari-web/test/views/main/host/config_service_test.js
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+var view;
+
+describe('App.MainHostServiceConfigsView', function () {
+
+  beforeEach(function () {
+    view = App.MainHostServiceConfigsView.create({
+      controller: Em.Object.create({
+        loadStep: Em.K
+      })
+    });
+  });
+
+  describe("#didInsertElement()", function() {
+
+    beforeEach(function() {
+      sinon.stub(view.get('controller'), 'loadStep');
+    });
+    afterEach(function() {
+      view.get('controller').loadStep.restore();
+    });
+
+    it("loadStep should be called", function() {
+      view.didInsertElement();
+      expect(view.get('controller').loadStep.calledOnce).to.be.true;
+    });
+  });
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/test/views/main/host/host_alerts_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/host/host_alerts_view_test.js b/ambari-web/test/views/main/host/host_alerts_view_test.js
index 6574309..cd38f49 100644
--- a/ambari-web/test/views/main/host/host_alerts_view_test.js
+++ b/ambari-web/test/views/main/host/host_alerts_view_test.js
@@ -25,7 +25,10 @@ describe('App.MainHostAlertsView', function () {
 
   beforeEach(function () {
     view = App.MainHostAlertsView.create({
-      controller: Em.Object.create()
+      controller: Em.Object.create(),
+      parentView: Em.Object.create({
+        controller: Em.Object.create()
+      })
     });
   });
 
@@ -114,4 +117,139 @@ describe('App.MainHostAlertsView', function () {
 
   });
 
+  describe("#willInsertElement()", function() {
+    var mock = {
+      loadAlertInstancesByHost: Em.K
+    };
+
+    beforeEach(function() {
+      sinon.stub(App.router, 'get').returns(mock);
+      sinon.spy(mock, 'loadAlertInstancesByHost');
+      sinon.stub(App.router, 'set');
+      view.set('parentView.controller.content', Em.Object.create({
+        hostName: 'host1'
+      }));
+    });
+    afterEach(function() {
+      mock.loadAlertInstancesByHost.restore();
+      App.router.get.restore();
+      App.router.set.restore();
+    });
+
+    it("loadAlertInstancesByHost should be called", function() {
+      view.willInsertElement();
+      expect(App.router.set.calledWith('mainAlertInstancesController.isUpdating', true)).to.be.true;
+    });
+
+    it("App.router.set should be called", function() {
+      view.willInsertElement();
+      expect(App.router.set.calledWith('mainAlertInstancesController.isUpdating', true)).to.be.true;
+    });
+  });
+
+  describe("#didInsertElement()", function() {
+
+    beforeEach(function() {
+      sinon.spy(view, 'tooltipsUpdater');
+    });
+    afterEach(function() {
+      view.tooltipsUpdater.restore();
+    });
+
+    it("tooltipsUpdater should be called", function() {
+      view.didInsertElement();
+      expect(view.tooltipsUpdater.calledOnce).to.be.true;
+    });
+  });
+
+
+  describe("#paginationLeftClass", function() {
+
+    it("startIndex is 2", function() {
+      view.set('startIndex', 2);
+      expect(view.get('paginationLeftClass')).to.equal('paginate_previous');
+    });
+
+    it("startIndex is 1", function() {
+      view.set('startIndex', 1);
+      expect(view.get('paginationLeftClass')).to.equal('paginate_disabled_previous');
+    });
+
+    it("startIndex is 0", function() {
+      view.set('startIndex', 0);
+      expect(view.get('paginationLeftClass')).to.equal('paginate_disabled_previous');
+    });
+  });
+
+  describe("#paginationRightClass", function() {
+
+    it("endIndex more than filteredCount", function() {
+      view.reopen({
+        endIndex: 4,
+        filteredCount: 3
+      });
+      expect(view.get('paginationRightClass')).to.equal('paginate_disabled_next');
+    });
+
+    it("endIndex equal to filteredCount", function() {
+      view.reopen({
+        endIndex: 4,
+        filteredCount: 4
+      });
+      expect(view.get('paginationRightClass')).to.equal('paginate_disabled_next');
+    });
+
+    it("endIndex less than filteredCount", function() {
+      view.reopen({
+        endIndex: 3,
+        filteredCount: 4
+      });
+      view.propertyDidChange('paginationRightClass');
+      expect(view.get('paginationRightClass')).to.equal('paginate_next');
+    });
+  });
+
+  describe("#clearFilters()", function() {
+    var mock = {
+      clearFilter: Em.K
+    };
+
+    beforeEach(function() {
+      sinon.spy(mock, 'clearFilter');
+    });
+    afterEach(function() {
+      mock.clearFilter.restore();
+    });
+
+    it("clearFilter should be called", function() {
+      view.reopen({
+        'childViews': [mock]
+      });
+      view.clearFilters();
+      expect(view.get('filterConditions')).to.be.empty;
+      expect(mock.clearFilter.calledOnce).to.be.true;
+    });
+  });
+
+  describe("#willDestroyElement()", function() {
+    var mock = {
+      tooltip: Em.K
+    };
+
+    beforeEach(function() {
+      sinon.stub(view, '$').returns(mock);
+      sinon.spy(mock, 'tooltip');
+    });
+    afterEach(function() {
+      view.$.restore();
+      mock.tooltip.restore();
+    });
+
+    it("tooltip should be called", function() {
+      view.willDestroyElement();
+      expect(view.$.calledWith(".enable-disable-button, .timeago, .alert-text")).to.be.true;
+      expect(mock.tooltip.calledWith('destroy')).to.be.true;
+    });
+  });
+
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/test/views/main/host/menu_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/host/menu_test.js b/ambari-web/test/views/main/host/menu_test.js
index 59facf1..520314d 100644
--- a/ambari-web/test/views/main/host/menu_test.js
+++ b/ambari-web/test/views/main/host/menu_test.js
@@ -89,4 +89,45 @@ describe('App.MainHostMenuView', function () {
     });
   });
 
-});
+  describe("#updateAlertCounter()", function() {
+
+    it("CRITICAL alerts", function() {
+      view.setProperties({
+        host: Em.Object.create({
+          criticalWarningAlertsCount: 1,
+          alertsSummary: Em.Object.create({
+            CRITICAL: 1,
+            WARNING: 0
+          })
+        })
+      });
+      view.updateAlertCounter();
+      expect(view.get('content').findProperty('name', 'alerts').get('badgeText')).to.equal('1');
+      expect(view.get('content').findProperty('name', 'alerts').get('badgeClasses')).to.equal('label alerts-crit-count');
+    });
+
+    it("WARNING alerts", function() {
+      view.setProperties({
+        host: Em.Object.create({
+          criticalWarningAlertsCount: 1,
+          alertsSummary: Em.Object.create({
+            CRITICAL: 0,
+            WARNING: 1
+          })
+        })
+      });
+      view.updateAlertCounter();
+      expect(view.get('content').findProperty('name', 'alerts').get('badgeText')).to.equal('1');
+      expect(view.get('content').findProperty('name', 'alerts').get('badgeClasses')).to.equal('label alerts-warn-count');
+    });
+  });
+
+  describe("#deactivateChildViews()", function() {
+    it("active attr should be empty", function() {
+      view.set('_childViews', [Em.Object.create({active: 'active'})]);
+      view.deactivateChildViews();
+      expect(view.get('_childViews').mapProperty('active')).to.eql(['']);
+    });
+  });
+
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/30438e90/ambari-web/test/views/main/host/summary_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/host/summary_test.js b/ambari-web/test/views/main/host/summary_test.js
index 2add4d0..47dbf3d 100644
--- a/ambari-web/test/views/main/host/summary_test.js
+++ b/ambari-web/test/views/main/host/summary_test.js
@@ -24,21 +24,34 @@ require('mappers/server_data_mapper');
 require('views/main/host/summary');
 
 var mainHostSummaryView;
-var extendedMainHostSummaryView = App.MainHostSummaryView.extend({content: {}, addToolTip: function(){}, installedServices: []});
 var modelSetup = require('test/init_model_test');
 
 describe('App.MainHostSummaryView', function() {
 
   beforeEach(function() {
     modelSetup.setupStackServiceComponent();
-    mainHostSummaryView = extendedMainHostSummaryView.create({});
+    mainHostSummaryView = App.MainHostSummaryView.create({content: Em.Object.create()});
   });
 
   afterEach(function(){
     modelSetup.cleanStackServiceComponent();
   });
 
-  describe('#sortedComponents', function() {
+  describe("#installedServices", function() {
+
+    beforeEach(function() {
+      sinon.stub(App.Service, 'find').returns([Em.Object.create({serviceName: 'S1'})]);
+    });
+    afterEach(function() {
+      App.Service.find.restore();
+    });
+
+    it("should return installed services", function() {
+      expect(mainHostSummaryView.get('installedServices')).to.eql(['S1']);
+    });
+  });
+
+  describe('#sortedComponentsFormatter()', function() {
 
     var tests = Em.A([
       {
@@ -274,108 +287,70 @@ describe('App.MainHostSummaryView', function() {
 
   describe('#addableComponents', function() {
 
+    beforeEach(function() {
+      this.mock = sinon.stub(App.StackServiceComponent, 'find');
+    });
+    afterEach(function() {
+      App.StackServiceComponent.find.restore();
+    });
+
     var tests = Em.A([
       {
-        installableClientComponents: [{}, {}],
+        addableToHostComponents: [
+          Em.Object.create({
+            serviceName: 'HDFS',
+            componentName: 'DATANODE',
+            isAddableToHost: true
+          }),
+          Em.Object.create({
+            serviceName: 'HDFS',
+            componentName: 'HDFS_CLIENT',
+            isAddableToHost: true
+          })
+        ],
         content: Em.Object.create({
           hostComponents: Em.A([
             Em.Object.create({
               componentName: 'HDFS_CLIENT'
-            }),
-            Em.Object.create({
-              componentName: 'DATANODE'
             })
           ])
         }),
-        services: ['HDFS', 'YARN', 'MAPREDUCE2'],
-        e: ['NODEMANAGER', 'YARN_CLIENT', 'MAPREDUCE2_CLIENT'],
+        services: ['HDFS'],
+        e: ['DATANODE'],
         m: 'some components are already installed'
       },
       {
-        installableClientComponents: [],
+        addableToHostComponents: [
+          Em.Object.create({
+            serviceName: 'HDFS',
+            componentName: 'HDFS_CLIENT',
+            isAddableToHost: true
+          })
+        ],
         content: Em.Object.create({
           hostComponents: Em.A([
             Em.Object.create({
               componentName: 'HDFS_CLIENT'
-            }),
-            Em.Object.create({
-              componentName: 'YARN_CLIENT'
-            }),
-            Em.Object.create({
-              componentName: 'MAPREDUCE2_CLIENT'
-            }),
-            Em.Object.create({
-              componentName: 'NODEMANAGER'
             })
           ])
         }),
-        services: ['HDFS', 'YARN', 'MAPREDUCE2'],
-        e: ['DATANODE'],
-        m: 'all clients and some other components are already installed'
+        services: ['HDFS'],
+        e: [],
+        m: 'all components are already installed'
       }
     ]);
 
     tests.forEach(function(test) {
       it(test.m, function() {
-        mainHostSummaryView.reopen({installableClientComponents: test.installableClientComponents});
+        this.mock.returns(test.addableToHostComponents);
         mainHostSummaryView.set('content', test.content);
-        mainHostSummaryView.set('installedServices', test.services);
-        expect(mainHostSummaryView.get('addableComponents').mapProperty('componentName')).to.eql(test.e);
-      });
-    });
-
-  });
-
-  describe("#clientsWithCustomCommands", function() {
-    before(function() {
-      sinon.stub(App.StackServiceComponent, 'find', function(component) {
-        var customCommands = [];
-
-        if (component === 'WITH_CUSTOM_COMMANDS') {
-          customCommands = ['CUSTOMCOMMAND'];
-        }
-
-        var obj = Em.Object.create({
-          customCommands: customCommands,
-          filterProperty: function () {
-            return {
-              mapProperty: Em.K
-            };
-          }
+        mainHostSummaryView.reopen({
+          installedServices: test.services
         });
-        return obj;
+        mainHostSummaryView.propertyDidChange('addableComponents');
+        expect(mainHostSummaryView.get('addableComponents').mapProperty('componentName')).to.eql(test.e);
       });
     });
-
-    after(function() {
-      App.StackServiceComponent.find.restore();
-    });
-    var content = Em.Object.create({
-      hostComponents: Em.A([
-        Em.Object.create({
-          componentName: 'WITH_CUSTOM_COMMANDS',
-          displayName: 'WITH_CUSTOM_COMMANDS',
-          hostName: 'c6401',
-          service: Em.Object.create({
-            serviceName: 'TESTSRV'
-          })
-        }),
-        Em.Object.create({
-          componentName: 'WITHOUT_CUSTOM_COMMANDS',
-          displayName: 'WITHOUT_CUSTOM_COMMANDS',
-          hostName: 'c6401',
-          service: Em.Object.create({
-            serviceName: 'TESTSRV'
-          })
-        })
-      ])
-    });
-
-    it("Clients with custom commands only", function() {
-      mainHostSummaryView.set('content', content);
-      expect(mainHostSummaryView.get('clientsWithCustomCommands').length).to.be.equal(1);
-      expect(mainHostSummaryView.get('clientsWithCustomCommands')).to.have.deep.property('[0].commands[0].command', 'CUSTOMCOMMAND');
-    });
   });
 
   describe('#areClientsNotInstalled', function () {
@@ -475,4 +450,152 @@ describe('App.MainHostSummaryView', function() {
     });
 
   });
+
+  describe("#needToRestartMessage", function() {
+
+    it("one component", function() {
+      var expected = Em.I18n.t('hosts.host.details.needToRestart').format(1, Em.I18n.t('common.component').toLowerCase());
+      mainHostSummaryView.set('content', Em.Object.create({
+        componentsWithStaleConfigsCount: 1
+      }));
+      expect(mainHostSummaryView.get('needToRestartMessage')).to.equal(expected);
+    });
+
+    it("multiple components", function() {
+      var expected = Em.I18n.t('hosts.host.details.needToRestart').format(2, Em.I18n.t('common.components').toLowerCase());
+      mainHostSummaryView.set('content', Em.Object.create({
+        componentsWithStaleConfigsCount: 2
+      }));
+      expect(mainHostSummaryView.get('needToRestartMessage')).to.equal(expected);
+    });
+
+  });
+
+  describe("#redrawComponents()", function() {
+
+    beforeEach(function() {
+      this.mock = sinon.stub(App.router, 'get');
+      sinon.stub(mainHostSummaryView, 'sortedComponentsFormatter');
+      sinon.stub(App.router, 'set');
+    });
+    afterEach(function() {
+      this.mock.restore();
+      mainHostSummaryView.sortedComponentsFormatter.restore();
+      App.router.set.restore();
+    });
+
+    it("redrawComponents is false", function() {
+      this.mock.returns(false);
+      mainHostSummaryView.redrawComponents();
+      expect(mainHostSummaryView.sortedComponentsFormatter.called).to.be.false;
+    });
+
+    it("redrawComponents is true", function() {
+      this.mock.returns(true);
+      mainHostSummaryView.redrawComponents();
+      expect(mainHostSummaryView.sortedComponentsFormatter.calledOnce).to.be.true;
+      expect(mainHostSummaryView.get('sorteComponents')).to.be.empty;
+      expect(App.router.set.calledWith('mainHostDetailsController.redrawComponents', false)).to.be.true;
+    });
+
+  });
+
+  describe("#willInsertElement()", function() {
+
+    beforeEach(function() {
+      sinon.stub(mainHostSummaryView, 'sortedComponentsFormatter');
+      sinon.stub(mainHostSummaryView, 'addObserver');
+    });
+    afterEach(function() {
+      mainHostSummaryView.sortedComponentsFormatter.restore();
+      mainHostSummaryView.addObserver.restore();
+    });
+
+    it("sortedComponentsFormatter should be called ", function() {
+      mainHostSummaryView.willInsertElement();
+      expect(mainHostSummaryView.sortedComponentsFormatter.calledOnce).to.be.true;
+      expect(mainHostSummaryView.addObserver.calledWith('content.hostComponents.length', mainHostSummaryView, 'sortedComponentsFormatter')).to.be.true;
+      expect(mainHostSummaryView.get('sortedComponents')).to.be.empty;
+    });
+  });
+
+  describe("#didInsertElement()", function() {
+
+    beforeEach(function() {
+      sinon.stub(mainHostSummaryView, 'addToolTip');
+    });
+    afterEach(function() {
+      mainHostSummaryView.addToolTip.restore();
+    });
+
+    it("addToolTip should be called", function() {
+      mainHostSummaryView.didInsertElement();
+      expect(mainHostSummaryView.addToolTip.calledOnce).to.be.true;
+    });
+  });
+
+  describe("#addToolTip()", function() {
+
+    beforeEach(function() {
+      sinon.stub(App, 'tooltip');
+      mainHostSummaryView.removeObserver('addComponentDisabled', mainHostSummaryView, 'addToolTip');
+    });
+    afterEach(function() {
+      App.tooltip.restore();
+    });
+
+    it("addComponentDisabled is false ", function() {
+      mainHostSummaryView.reopen({
+        addComponentDisabled: false
+      });
+      mainHostSummaryView.addToolTip();
+      expect(App.tooltip.called).to.be.false;
+    });
+
+    it("addComponentDisabled is true ", function() {
+      mainHostSummaryView.reopen({
+        addComponentDisabled: true
+      });
+      mainHostSummaryView.addToolTip();
+      expect(App.tooltip.called).to.be.true;
+    });
+
+  });
+
+  describe("#installableClientComponents", function() {
+
+    beforeEach(function() {
+      sinon.stub(App.StackServiceComponent, 'find').returns([
+        Em.Object.create({
+          isClient: true,
+          serviceName: 'S1',
+          componentName: 'C1'
+        }),
+        Em.Object.create({
+          isClient: true,
+          serviceName: 'S1',
+          componentName: 'C2'
+        }),
+        Em.Object.create({
+          isClient: true,
+          serviceName: 'S2',
+          componentName: 'C1'
+        })
+      ]);
+    });
+    afterEach(function() {
+      App.StackServiceComponent.find.restore();
+    });
+
+    it("should return installable client components", function() {
+      mainHostSummaryView.reopen({
+        installedServices: ['S1'],
+        clients: [
+          Em.Object.create({componentName: 'C2'})
+        ]
+      });
+      mainHostSummaryView.propertyDidChange('installableClientComponents');
+      expect(mainHostSummaryView.get('installableClientComponents').mapProperty('componentName')).to.eql(['C1']);
+    });
+  });
 });


[05/50] [abbrv] ambari git commit: AMBARI-14988: DB consistency - Add consistency check on clusterconfigmapping in ClusterImpl::getDesiredConfigs() for NPE.

Posted by jo...@apache.org.
AMBARI-14988: DB consistency - Add consistency check on clusterconfigmapping in ClusterImpl::getDesiredConfigs() for NPE.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/966f3031
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/966f3031
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/966f3031

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 966f3031d50b0106dfe5d19abbdd1e3b84d1f059
Parents: f247ddc
Author: Nahappan Somasundaram <ns...@hortonworks.com>
Authored: Wed Feb 10 13:10:28 2016 -0800
Committer: Nahappan Somasundaram <ns...@hortonworks.com>
Committed: Thu Feb 11 10:35:00 2016 -0800

----------------------------------------------------------------------
 .../apache/ambari/server/state/cluster/ClusterImpl.java | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/966f3031/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 50e02a5..57941d0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -2267,7 +2267,17 @@ public class ClusterImpl implements Cluster {
                 " unknown configType=" + e.getType());
             continue;
           }
-          c.setVersion(allConfigs.get(e.getType()).get(e.getTag()).getVersion());
+
+          Map<String, Config> configMap = allConfigs.get(e.getType());
+          if(!configMap.containsKey(e.getTag())) {
+            LOG.debug("Config inconsistency exists for typeName=" +
+                    e.getType() +
+                    ", unknown versionTag=" + e.getTag());
+            continue;
+          }
+
+          Config config = configMap.get(e.getTag());
+          c.setVersion(config.getVersion());
 
           Set<DesiredConfig> configs = map.get(e.getType());
           if (configs == null) {


[03/50] [abbrv] ambari git commit: AMBARI-14800 Alerts: HDFS alerts based on AMS metrics (additional patch) (dsen)

Posted by jo...@apache.org.
AMBARI-14800 Alerts: HDFS alerts based on AMS metrics (additional patch) (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e0492163
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e0492163
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e0492163

Branch: refs/heads/branch-dev-patch-upgrade
Commit: e0492163d060e1840eecc1d463a9e5ddebbf81a5
Parents: 58fe67c
Author: Dmytro Sen <ds...@apache.org>
Authored: Thu Feb 11 19:37:11 2016 +0200
Committer: Dmytro Sen <ds...@apache.org>
Committed: Thu Feb 11 19:37:11 2016 +0200

----------------------------------------------------------------------
 .../common-services/HDFS/2.1.0.2.0/alerts.json      | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e0492163/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/alerts.json b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/alerts.json
index bba6c11..2a6229c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/alerts.json
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/alerts.json
@@ -523,7 +523,7 @@
       },
       {
         "name": "increase_nn_heap_usage_hourly",
-        "label": "Hourly increase in NN heap usage",
+        "label": "NameNode Heap Usage (Hourly)",
         "description": "This service-level alert is triggered if the NN heap usage deviation has grown beyond the specified threshold within a given time interval.",
         "interval": 5,
         "scope": "ANY",
@@ -581,7 +581,7 @@
       },
       {
         "name": "namenode_service_rpc_latency_hourly",
-        "label": "Hourly Service-RPC latency",
+        "label": "NameNode RPC Latency (Hourly)",
         "description": "This service-level alert is triggered if the Service-RPC latency deviation has grown beyond the specified threshold within a given time interval.",
         "interval": 5,
         "scope": "ANY",
@@ -639,7 +639,7 @@
       },
       {
         "name": "namenode_increase_in_storage_capacity_usage_hourly",
-        "label": "Hourly increase in storage capacity usage",
+        "label": "HDFS Storage Capacity Usage (Hourly)",
         "description": "This service-level alert is triggered if the increase in storage capacity usage deviation has grown beyond the specified threshold within a given time interval.",
         "interval": 5,
         "scope": "ANY",
@@ -697,7 +697,7 @@
       },
       {
         "name": "increase_nn_heap_usage_daily",
-        "label": "Daily increase in NN heap usage",
+        "label": "NameNode Heap Usage (Daily)",
         "description": "This service-level alert is triggered if the NN heap usage deviation has grown beyond the specified threshold within a given time interval.",
         "interval": 480,
         "scope": "ANY",
@@ -755,7 +755,7 @@
       },
       {
         "name": "namenode_service_rpc_latency_daily",
-        "label": "Daily Service-RPC latency",
+        "label": "NameNode RPC Latency (Daily)",
         "description": "This service-level alert is triggered if the Service-RPC latency deviation has grown beyond the specified threshold within a given time interval.",
         "interval": 480,
         "scope": "ANY",
@@ -813,7 +813,7 @@
       },
       {
         "name": "namenode_increase_in_storage_capacity_usage_daily",
-        "label": "Daily increase in storage capacity usage",
+        "label": "HDFS Storage Capacity Usage (Daily)",
         "description": "This service-level alert is triggered if the increase in storage capacity usage deviation has grown beyond the specified threshold within a given time interval.",
         "interval": 480,
         "scope": "ANY",
@@ -871,7 +871,7 @@
       },
       {
         "name": "increase_nn_heap_usage_weekly",
-        "label": "Weekly increase in NN heap usage",
+        "label": "NameNode Heap Usage (Weekly)",
         "description": "This service-level alert is triggered if the NN heap usage deviation has grown beyond the specified threshold within a given time interval.",
         "interval": 1440,
         "scope": "ANY",
@@ -929,7 +929,7 @@
       },
       {
         "name": "namenode_increase_in_storage_capacity_usage_weekly",
-        "label": "Weekly increase in storage capacity usage",
+        "label": "HDFS Storage Capacity Usage (Weekly)",
         "description": "This service-level alert is triggered if the increase in storage capacity usage deviation has grown beyond the specified threshold within a given time interval.",
         "interval": 1440,
         "scope": "ANY",


[33/50] [abbrv] ambari git commit: AMBARI-14023. Agents should not ask for auto-start command details if it has the details (smohanty)

Posted by jo...@apache.org.
AMBARI-14023. Agents should not ask for auto-start command details if it has the details (smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/230c1d66
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/230c1d66
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/230c1d66

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 230c1d662d5322623e5402fee3f5d766bf985e6f
Parents: 604040f
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Sun Feb 14 11:07:20 2016 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Sun Feb 14 11:07:20 2016 -0800

----------------------------------------------------------------------
 .../src/main/python/ambari_agent/ActionQueue.py |  3 +-
 .../test/python/ambari_agent/TestActionQueue.py | 59 ++++++++++++++++++++
 2 files changed, 61 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/230c1d66/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
index a4c433d..bf7b5d9 100644
--- a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
+++ b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
@@ -451,7 +451,8 @@ class ActionQueue(threading.Thread):
         if self.controller.recovery_manager.enabled() \
             and self.controller.recovery_manager.configured_for_recovery(component):
           self.controller.recovery_manager.update_current_status(component, component_status)
-      request_execution_cmd = self.controller.recovery_manager.requires_recovery(component)
+      request_execution_cmd = self.controller.recovery_manager.requires_recovery(component) and \
+                                not self.controller.recovery_manager.command_exists(component, ActionQueue.EXECUTION_COMMAND)
 
       if component_status_result.has_key('structuredOut'):
         component_extra = component_status_result['structuredOut']

http://git-wip-us.apache.org/repos/asf/ambari/blob/230c1d66/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py b/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
index f7e2894..3d7acbc 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
@@ -682,6 +682,65 @@ class TestActionQueue(TestCase):
     self.assertEqual(report['componentStatus'][0], expected)
     self.assertTrue(requestComponentStatus_mock.called)
 
+  @patch.object(RecoveryManager, "command_exists")
+  @patch.object(RecoveryManager, "requires_recovery")
+  @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
+  @patch.object(ActionQueue, "status_update_callback")
+  @patch.object(StackVersionsFileHandler, "read_stack_version")
+  @patch.object(CustomServiceOrchestrator, "requestComponentStatus")
+  @patch.object(CustomServiceOrchestrator, "requestComponentSecurityState")
+  @patch.object(ActionQueue, "execute_command")
+  @patch.object(LiveStatus, "build")
+  @patch.object(CustomServiceOrchestrator, "__init__")
+  def test_execute_status_command_recovery(self, CustomServiceOrchestrator_mock,
+                                  build_mock, execute_command_mock, requestComponentSecurityState_mock,
+                                  requestComponentStatus_mock, read_stack_version_mock,
+                                  status_update_callback, requires_recovery_mock,
+                                  command_exists_mock):
+    CustomServiceOrchestrator_mock.return_value = None
+    dummy_controller = MagicMock()
+    actionQueue = ActionQueue(AmbariConfig(), dummy_controller)
+
+    build_mock.return_value = {'dummy report': '' }
+    requires_recovery_mock.return_value = True
+    command_exists_mock.return_value = False
+
+    dummy_controller.recovery_manager = RecoveryManager(tempfile.mktemp(), True, False)
+
+    requestComponentStatus_mock.reset_mock()
+    requestComponentStatus_mock.return_value = {'exitcode': 0 }
+
+    requestComponentSecurityState_mock.reset_mock()
+    requestComponentSecurityState_mock.return_value = 'UNKNOWN'
+
+    actionQueue.execute_status_command(self.status_command)
+    report = actionQueue.result()
+    expected = {'dummy report': '',
+                'securityState' : 'UNKNOWN',
+                'sendExecCmdDet': 'True'}
+
+    self.assertEqual(len(report['componentStatus']), 1)
+    self.assertEqual(report['componentStatus'][0], expected)
+    self.assertTrue(requestComponentStatus_mock.called)
+
+    requires_recovery_mock.return_value = True
+    command_exists_mock.return_value = True
+    requestComponentStatus_mock.reset_mock()
+    requestComponentStatus_mock.return_value = {'exitcode': 0 }
+
+    requestComponentSecurityState_mock.reset_mock()
+    requestComponentSecurityState_mock.return_value = 'UNKNOWN'
+
+    actionQueue.execute_status_command(self.status_command)
+    report = actionQueue.result()
+    expected = {'dummy report': '',
+                'securityState' : 'UNKNOWN',
+                'sendExecCmdDet': 'False'}
+
+    self.assertEqual(len(report['componentStatus']), 1)
+    self.assertEqual(report['componentStatus'][0], expected)
+    self.assertTrue(requestComponentStatus_mock.called)
+
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(ActionQueue, "status_update_callback")
   @patch.object(StackVersionsFileHandler, "read_stack_version")


[08/50] [abbrv] ambari git commit: AMBARI-14976 Add Grafana QuickLinks for AMS Service in Ambari Web. (atkach)

Posted by jo...@apache.org.
AMBARI-14976 Add Grafana QuickLinks for AMS Service in Ambari Web. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a638ccb5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a638ccb5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a638ccb5

Branch: refs/heads/branch-dev-patch-upgrade
Commit: a638ccb5345cb7f34e2c42b075c33cf3814565db
Parents: 7d1ab29
Author: Andrii Tkach <at...@hortonworks.com>
Authored: Wed Feb 10 16:19:33 2016 +0200
Committer: Andrii Tkach <at...@hortonworks.com>
Committed: Thu Feb 11 21:19:13 2016 +0200

----------------------------------------------------------------------
 .../AMBARI_METRICS/0.1.0/metainfo.xml           |  7 ++++
 .../0.1.0/quickLinks/quicklinks.json            | 34 ++++++++++++++++++++
 .../app/mappers/service_metrics_mapper.js       |  3 +-
 ambari-web/app/models/quick_links.js            | 12 +++++++
 .../app/views/common/quick_view_link_view.js    |  3 ++
 .../test/views/common/quick_link_view_test.js   |  5 +++
 6 files changed, 63 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a638ccb5/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
index 3a832eb..67a93a5 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
@@ -161,6 +161,13 @@
         <config-type>storm-site</config-type>
       </excluded-config-types>
 
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a638ccb5/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/quickLinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/quickLinks/quicklinks.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/quickLinks/quicklinks.json
new file mode 100644
index 0000000..09e7de5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/quickLinks/quicklinks.json
@@ -0,0 +1,34 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"protocol",
+          "desired":"https",
+          "site":"ams-grafana-ini"
+        }
+      ]
+    },
+    "links": [
+      {
+        "name": "metrics_ui_server",
+        "label": "Metrics UI Server",
+        "requires_user_name": "false",
+        "url":"%@://%@:%@",
+        "template":"%@://%@:%@",
+        "port":{
+          "http_property": "port",
+          "http_default_port": "3000",
+          "https_property": "port",
+          "https_default_port": "3000",
+          "regex": "^(\\d+)$",
+          "site": "ams-grafana-ini"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a638ccb5/ambari-web/app/mappers/service_metrics_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/service_metrics_mapper.js b/ambari-web/app/mappers/service_metrics_mapper.js
index 1ac2b54..80e86c6 100644
--- a/ambari-web/app/mappers/service_metrics_mapper.js
+++ b/ambari-web/app/mappers/service_metrics_mapper.js
@@ -387,7 +387,8 @@ App.serviceMetricsMapper = App.QuickDataMapper.create({
       RANGER: [33],
       SPARK: [34],
       ACCUMULO: [35],
-      ATLAS: [36]
+      ATLAS: [36],
+      AMBARI_METRICS: [37]
     };
     if (quickLinks[item.ServiceInfo.service_name])
       finalJson.quick_links = quickLinks[item.ServiceInfo.service_name];

http://git-wip-us.apache.org/repos/asf/ambari/blob/a638ccb5/ambari-web/app/models/quick_links.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/quick_links.js b/ambari-web/app/models/quick_links.js
index eb9dfd6..f6c1fb5 100644
--- a/ambari-web/app/models/quick_links.js
+++ b/ambari-web/app/models/quick_links.js
@@ -343,6 +343,18 @@ App.QuickLinks.FIXTURES = [
     regex: '^(\\d+)$',
     default_http_port: 21000,
     default_https_port: 21443
+  },
+  {
+    id:37,
+    label:'Metrics UI Server',
+    url:'%@://%@:%@',
+    service_id: 'AMBARI_METRICS',
+    template:'%@://%@:%@',
+    http_config: 'port',
+    site: 'ams-grafana-ini',
+    regex: '^(\\d+)$',
+    default_http_port: 3000,
+    default_https_port: 3000
   }
 
 ];

http://git-wip-us.apache.org/repos/asf/ambari/blob/a638ccb5/ambari-web/app/views/common/quick_view_link_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/quick_view_link_view.js b/ambari-web/app/views/common/quick_view_link_view.js
index 1eef254..2123668 100644
--- a/ambari-web/app/views/common/quick_view_link_view.js
+++ b/ambari-web/app/views/common/quick_view_link_view.js
@@ -549,6 +549,9 @@ App.QuickViewLinks = Em.View.extend({
       case "MAPREDUCE2":
         hosts = this.findHosts('HISTORYSERVER', response);
         break;
+      case "AMBARI_METRICS":
+        hosts = this.findHosts('METRICS_GRAFANA', response);
+        break;
       default:
         if (this.getWithDefault('content.hostComponents', []).someProperty('isMaster')) {
           hosts = this.findHosts(this.get('content.hostComponents').findProperty('isMaster').get('componentName'), response);

http://git-wip-us.apache.org/repos/asf/ambari/blob/a638ccb5/ambari-web/test/views/common/quick_link_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/common/quick_link_view_test.js b/ambari-web/test/views/common/quick_link_view_test.js
index 3a604fe..b9050f8 100644
--- a/ambari-web/test/views/common/quick_link_view_test.js
+++ b/ambari-web/test/views/common/quick_link_view_test.js
@@ -959,6 +959,11 @@ describe('App.QuickViewLinks', function () {
       expect(quickViewLinks.findHosts.calledWith('HISTORYSERVER', {})).to.be.true;
     });
 
+    it("AMBARI_METRICS service", function() {
+      expect(quickViewLinks.getHosts({}, 'AMBARI_METRICS')).to.eql(['host1']);
+      expect(quickViewLinks.findHosts.calledWith('METRICS_GRAFANA', {})).to.be.true;
+    });
+
     it("custom service without master", function() {
       expect(quickViewLinks.getHosts({}, 'S1')).to.be.empty;
     });


[15/50] [abbrv] ambari git commit: AMBARI-15028. Critical alert for Kafka when using custom port(stack 2.3 and higher).(vbrodetskyi)

Posted by jo...@apache.org.
AMBARI-15028. Critical alert for Kafka when using custom port(stack 2.3 and higher).(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0a9a3aa5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0a9a3aa5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0a9a3aa5

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 0a9a3aa514f4859f2f4560fd3bd6c6036b173de8
Parents: b22aa2e
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Fri Feb 12 03:10:15 2016 +0200
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Fri Feb 12 03:10:15 2016 +0200

----------------------------------------------------------------------
 .../python/ambari_agent/alerts/port_alert.py    |  2 +-
 .../stacks/HDP/2.3/services/KAFKA/alerts.json   | 32 ++++++++++++++++++++
 2 files changed, 33 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0a9a3aa5/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
index 848da65..92d28ad 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
@@ -102,7 +102,7 @@ class PortAlert(BaseAlert):
 
 
     host = BaseAlert.get_host_from_url(uri_value)
-    if host is None:
+    if host is None or host == "localhost" or host == "0.0.0.0":
       host = self.host_name
 
     try:

http://git-wip-us.apache.org/repos/asf/ambari/blob/0a9a3aa5/ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/alerts.json b/ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/alerts.json
new file mode 100644
index 0000000..04fb583
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/alerts.json
@@ -0,0 +1,32 @@
+{
+  "KAFKA": {
+    "service": [],
+    "KAFKA_BROKER": [
+      {
+        "name": "kafka_broker_process",
+        "label": "Kafka Broker Process",
+        "description": "This host-level alert is triggered if the Kafka Broker cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{kafka-broker/listeners}}",
+          "default_port": 6667,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}


[46/50] [abbrv] ambari git commit: AMBARI-15050 Https Support for Metrics System (dsen)

Posted by jo...@apache.org.
AMBARI-15050 Https Support for Metrics System (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7e75e52a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7e75e52a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7e75e52a

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 7e75e52a91568d7ed0dd2bdaafbd60b37a3e7dc8
Parents: 0868a0f
Author: Dmytro Sen <ds...@apache.org>
Authored: Tue Feb 16 17:53:11 2016 +0200
Committer: Dmytro Sen <ds...@apache.org>
Committed: Tue Feb 16 17:53:11 2016 +0200

----------------------------------------------------------------------
 .../timeline/AbstractTimelineMetricsSink.java   |   92 +-
 .../src/main/conf/flume-metrics2.properties.j2  |    3 +-
 .../sink/flume/FlumeTimelineMetricsSink.java    |   11 +-
 .../conf/hadoop-metrics2-hbase.properties.j2    |    8 +-
 .../src/main/conf/hadoop-metrics2.properties.j2 |   22 +-
 .../timeline/HadoopTimelineMetricsSink.java     |   13 +-
 .../timeline/HadoopTimelineMetricsSinkTest.java |    6 +-
 .../conf/unix/metric_monitor.ini                |    1 +
 .../src/main/python/core/config_reader.py       |    6 +-
 .../src/main/python/core/emitter.py             |   18 +-
 .../kafka/KafkaTimelineMetricsReporter.java     |   17 +-
 .../kafka/KafkaTimelineMetricsReporterTest.java |    2 +-
 .../storm/StormTimelineMetricsReporter.java     |   24 +-
 .../sink/storm/StormTimelineMetricsSink.java    |    8 +-
 .../ApplicationHistoryServer.java               |   11 +-
 .../loadsimulator/net/RestMetricsSender.java    |    6 +-
 .../timeline/TimelineMetricConfiguration.java   |    6 +-
 .../ComponentSSLConfiguration.java              |   14 +-
 .../server/configuration/Configuration.java     |    6 +-
 .../ganglia/GangliaPropertyProvider.java        |    2 +-
 .../ganglia/GangliaReportPropertyProvider.java  |    2 +-
 .../metrics/timeline/AMSPropertyProvider.java   |    8 +-
 .../timeline/AMSReportPropertyProvider.java     |    2 +-
 .../ACCUMULO/1.6.1.2.2.0/metainfo.xml           |    1 +
 .../1.6.1.2.2.0/package/scripts/params.py       |    7 +
 .../hadoop-metrics2-accumulo.properties.j2      |    7 +-
 .../0.1.0/configuration/ams-site.xml            |   12 +-
 .../0.1.0/configuration/ams-ssl-client.xml      |   37 +
 .../0.1.0/configuration/ams-ssl-server.xml      |   64 +
 .../AMBARI_METRICS/0.1.0/metainfo.xml           |    2 +
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py |    8 +
 .../package/scripts/metrics_grafana_util.py     |    2 +-
 .../0.1.0/package/scripts/params.py             |   10 +
 .../0.1.0/package/scripts/service_check.py      |   17 +-
 .../hadoop-metrics2-hbase.properties.j2         |    7 +-
 .../package/templates/metric_monitor.ini.j2     |    1 +
 .../FLUME/1.4.0.2.0/metainfo.xml                |    1 +
 .../FLUME/1.4.0.2.0/package/scripts/params.py   |    7 +
 .../templates/flume-metrics2.properties.j2      |    8 +-
 .../common-services/HAWQ/2.0.0/metainfo.xml     |    1 +
 .../HBASE/0.96.0.2.0/metainfo.xml               |    1 +
 .../0.96.0.2.0/package/scripts/params_linux.py  |    8 +
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |    7 +-
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |    7 +-
 .../common-services/HDFS/2.1.0.2.0/metainfo.xml |    1 +
 .../0.8.1.2.2/configuration/kafka-broker.xml    |   21 +
 .../KAFKA/0.8.1.2.2/metainfo.xml                |    1 +
 .../KAFKA/0.8.1.2.2/package/scripts/kafka.py    |    4 +
 .../KAFKA/0.8.1.2.2/package/scripts/params.py   |   10 +-
 .../STORM/0.9.1.2.1/metainfo.xml                |    1 +
 .../0.9.1.2.1/package/scripts/params_linux.py   |   11 +-
 .../0.9.1.2.1/package/templates/config.yaml.j2  |    8 +-
 .../templates/storm-metrics2.properties.j2      |   10 +-
 .../common-services/YARN/2.1.0.2.0/metainfo.xml |    2 +
 .../2.0.6/hooks/before-START/scripts/params.py  |    8 +
 .../templates/hadoop-metrics2.properties.j2     |   25 +-
 .../ComponentSSLConfigurationTest.java          |    6 +-
 .../ganglia/GangliaPropertyProviderTest.java    |   26 +-
 .../GangliaReportPropertyProviderTest.java      |    2 +-
 .../timeline/AMSPropertyProviderTest.java       |   30 +-
 .../timeline/AMSReportPropertyProviderTest.java |    4 +-
 .../AMBARI_METRICS/test_metrics_collector.py    |    8 +
 .../python/stacks/2.0.6/configs/default.json    | 1146 +++++++++---------
 .../2.0.6/configs/default_ams_embedded.json     |    7 +
 64 files changed, 1113 insertions(+), 721 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
index 9173889..2854898 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
@@ -24,21 +24,35 @@ import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.map.annotate.JsonSerialize;
 import org.codehaus.jackson.xc.JaxbAnnotationIntrospector;
 
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLSocketFactory;
+import javax.net.ssl.TrustManagerFactory;
+import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.net.HttpURLConnection;
 import java.net.URL;
+import java.security.KeyStore;
 
 public abstract class AbstractTimelineMetricsSink {
   public static final String TAGS_FOR_PREFIX_PROPERTY_PREFIX = "tagsForPrefix.";
   public static final String MAX_METRIC_ROW_CACHE_SIZE = "maxRowCacheSize";
   public static final String METRICS_SEND_INTERVAL = "sendInterval";
   public static final String METRICS_POST_TIMEOUT_SECONDS = "timeout";
-  public static final String COLLECTOR_HOST_PROPERTY = "collector";
-  public static final String COLLECTOR_PORT_PROPERTY = "port";
+  public static final String COLLECTOR_PROPERTY = "collector";
   public static final int DEFAULT_POST_TIMEOUT_SECONDS = 10;
   public static final String SKIP_COUNTER_TRANSFROMATION = "skipCounterDerivative";
 
+  public static final String WS_V1_TIMELINE_METRICS = "/ws/v1/timeline/metrics";
+
+  public static final String SSL_KEYSTORE_PATH_PROPERTY = "truststore.path";
+  public static final String SSL_KEYSTORE_TYPE_PROPERTY = "truststore.type";
+  public static final String SSL_KEYSTORE_PASSWORD_PROPERTY = "truststore.password";
+
+  private SSLSocketFactory sslSocketFactory;
+
   protected final Log LOG;
 
   protected static ObjectMapper mapper;
@@ -48,7 +62,7 @@ public abstract class AbstractTimelineMetricsSink {
     AnnotationIntrospector introspector = new JaxbAnnotationIntrospector();
     mapper.setAnnotationIntrospector(introspector);
     mapper.getSerializationConfig()
-        .setSerializationInclusion(JsonSerialize.Inclusion.NON_NULL);
+      .withSerializationInclusion(JsonSerialize.Inclusion.NON_NULL);
   }
 
   public AbstractTimelineMetricsSink() {
@@ -59,9 +73,13 @@ public abstract class AbstractTimelineMetricsSink {
     String connectUrl = getCollectorUri();
     int timeout = getTimeoutSeconds() * 1000;
     try {
+      if (connectUrl == null) {
+        throw new IOException("Unknown URL. " +
+          "Unable to connect to metrics collector.");
+      }
       String jsonData = mapper.writeValueAsString(metrics);
-
-      HttpURLConnection connection = (HttpURLConnection) new URL(connectUrl).openConnection();
+      HttpURLConnection connection = connectUrl.startsWith("https") ?
+        getSSLConnection(connectUrl) : getConnection(connectUrl);
 
       connection.setRequestMethod("POST");
       connection.setRequestProperty("Content-Type", "application/json");
@@ -81,13 +99,75 @@ public abstract class AbstractTimelineMetricsSink {
         LOG.info("Unable to POST metrics to collector, " + connectUrl + ", " +
           "statusCode = " + statusCode);
       } else {
-        LOG.debug("Metrics posted to Collector " + connectUrl);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Metrics posted to Collector " + connectUrl);
+        }
       }
     } catch (IOException e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Unable to connect to collector, " + connectUrl, e);
+      } else {
+        LOG.info("Unable to connect to collector, " + connectUrl);
+      }
       throw new UnableToConnectException(e).setConnectUrl(connectUrl);
     }
   }
 
+  // Get a connection
+  protected HttpURLConnection getConnection(String spec) throws IOException {
+    return (HttpURLConnection) new URL(spec).openConnection();
+  }
+
+  // Get an ssl connection
+  protected HttpsURLConnection getSSLConnection(String spec)
+    throws IOException, IllegalStateException {
+
+    HttpsURLConnection connection = (HttpsURLConnection) (new URL(spec)
+      .openConnection());
+
+    connection.setSSLSocketFactory(sslSocketFactory);
+
+    return connection;
+  }
+
+  protected void loadTruststore(String trustStorePath, String trustStoreType,
+                                String trustStorePassword) {
+    if (sslSocketFactory == null) {
+      if (trustStorePath == null || trustStorePassword == null) {
+
+        String msg =
+          String.format("Can't load TrustStore. " +
+            "Truststore path or password is not set.");
+
+        LOG.error(msg);
+        throw new IllegalStateException(msg);
+      }
+      FileInputStream in = null;
+      try {
+        in = new FileInputStream(new File(trustStorePath));
+        KeyStore store = KeyStore.getInstance(trustStoreType == null ?
+          KeyStore.getDefaultType() : trustStoreType);
+        store.load(in, trustStorePassword.toCharArray());
+        TrustManagerFactory tmf = TrustManagerFactory
+          .getInstance(TrustManagerFactory.getDefaultAlgorithm());
+        tmf.init(store);
+        SSLContext context = SSLContext.getInstance("TLS");
+        context.init(null, tmf.getTrustManagers(), null);
+        sslSocketFactory = context.getSocketFactory();
+      } catch (Exception e) {
+        LOG.error("Unable to load TrustStore", e);
+      } finally {
+        if (in != null) {
+          try {
+            in.close();
+          } catch (IOException e) {
+            LOG.error("Unable to load TrustStore", e);
+          }
+        }
+      }
+    }
+  }
+
   abstract protected String getCollectorUri();
 
   abstract protected int getTimeoutSeconds();

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-flume-sink/src/main/conf/flume-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-flume-sink/src/main/conf/flume-metrics2.properties.j2 b/ambari-metrics/ambari-metrics-flume-sink/src/main/conf/flume-metrics2.properties.j2
index bc18043..f9b303e 100644
--- a/ambari-metrics/ambari-metrics-flume-sink/src/main/conf/flume-metrics2.properties.j2
+++ b/ambari-metrics/ambari-metrics-flume-sink/src/main/conf/flume-metrics2.properties.j2
@@ -16,8 +16,7 @@
 # limitations under the License.
 #}
 
-collector={{metric_collector_host}}
-port={{metric_collector_port}}
+collector=http://localhost:6188
 collectionFrequency=60000
 maxRowCacheSize=10000
 sendInterval=59000

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
index cf2b4ae..3040c48 100644
--- a/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.metrics2.sink.flume;
 
-import org.apache.commons.lang.ClassUtils;
 import org.apache.commons.lang.math.NumberUtils;
 import org.apache.flume.Context;
 import org.apache.flume.FlumeException;
@@ -96,9 +95,13 @@ public class FlumeTimelineMetricsSink extends AbstractTimelineMetricsSink implem
     metricsSendInterval = Integer.parseInt(configuration.getProperty(METRICS_SEND_INTERVAL,
         String.valueOf(TimelineMetricsCache.MAX_EVICTION_TIME_MILLIS)));
     metricsCaches = new HashMap<String, TimelineMetricsCache>();
-    String collectorHostname = configuration.getProperty(COLLECTOR_HOST_PROPERTY);
-    String port = configuration.getProperty(COLLECTOR_PORT_PROPERTY);
-    collectorUri = "http://" + collectorHostname + ":" + port + "/ws/v1/timeline/metrics";
+    collectorUri = configuration.getProperty(COLLECTOR_PROPERTY) + WS_V1_TIMELINE_METRICS;
+    if (collectorUri.toLowerCase().startsWith("https://")) {
+      String trustStorePath = configuration.getProperty(SSL_KEYSTORE_PATH_PROPERTY).trim();
+      String trustStoreType = configuration.getProperty(SSL_KEYSTORE_TYPE_PROPERTY).trim();
+      String trustStorePwd = configuration.getProperty(SSL_KEYSTORE_PASSWORD_PROPERTY).trim();
+      loadTruststore(trustStorePath, trustStoreType, trustStorePwd);
+    }
     pollFrequency = Long.parseLong(configuration.getProperty("collectionFrequency"));
 
     String[] metrics = configuration.getProperty(COUNTER_METRICS_PROPERTY).trim().split(",");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2-hbase.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2-hbase.properties.j2 b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2-hbase.properties.j2
index 0d13498..c819301 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2-hbase.properties.j2
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2-hbase.properties.j2
@@ -31,19 +31,19 @@ hbase.extendedperiod = 3600
 # Configuration of the "hbase" context for timeline metrics service
 hbase.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 hbase.period=10
-hbase.collector={{timeline_server_hosts}}:8188
+hbase.collector={{timeline_server_hosts}}:6188
 
 # Configuration of the "jvm" context for timeline metrics service
 jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 jvm.period=10
-jvm.collector={{timeline_server_hosts}}:8188
+jvm.collector={{timeline_server_hosts}}:6188
 
 # Configuration of the "rpc" context for timeline metrics service
 rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 rpc.period=10
-rpc.collector={{timeline_server_hosts}}:8188
+rpc.collector={{timeline_server_hosts}}:6188
 
 # Following hadoop example
 hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 hbase.sink.timeline.period=10
-hbase.sink.timeline.collector={{timeline_server_hosts}}:8188
\ No newline at end of file
+hbase.sink.timeline.collector=http://{{timeline_server_hosts}}:6188
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2.properties.j2 b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2.properties.j2
index 76a00d1..ec1377d 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2.properties.j2
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2.properties.j2
@@ -42,17 +42,17 @@
 
 
 # Hook up to the server
-datanode.sink.timeline.collector={{timeline_server_hosts}}:8188
-namenode.sink.timeline.collector={{timeline_server_hosts}}:8188
-resourcemanager.sink.timeline.collector={{timeline_server_hosts}}:8188
-nodemanager.sink.timeline.collector={{timeline_server_hosts}}:8188
-historyserver.sink.timeline.collector={{timeline_server_hosts}}:8188
-journalnode.sink.timeline.collector={{timeline_server_hosts}}:8188
-nimbus.sink.timeline.collector={{timeline_server_hosts}}:8188
-supervisor.sink.timeline.collector={{timeline_server_hosts}}:8188
-maptask.sink.timeline.collector={{timeline_server_hosts}}:8188
-reducetask.sink.timeline.collector={{timeline_server_hosts}}:8188
+datanode.sink.timeline.collector=http://localhost:6188
+namenode.sink.timeline.collector=http://localhost:6188
+resourcemanager.sink.timeline.collector=http://localhost:6188
+nodemanager.sink.timeline.collector=http://localhost:6188
+historyserver.sink.timeline.collector=http://localhost:6188
+journalnode.sink.timeline.collector=http://localhost:6188
+nimbus.sink.timeline.collector=http://localhost:6188
+supervisor.sink.timeline.collector=http://localhost:6188
+maptask.sink.timeline.collector=http://localhost:6188
+reducetask.sink.timeline.collector=http://localhost:6188
 
 resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 
-{% endif %}
\ No newline at end of file
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
index 000b82e..6da9257 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
@@ -29,7 +29,6 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.commons.configuration.SubsetConfiguration;
-import org.apache.commons.lang.ClassUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -79,13 +78,19 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
     LOG.info("Identified hostname = " + hostName + ", serviceName = " + serviceName);
 
     // Load collector configs
-    metricsServers = Servers.parse(conf.getString(COLLECTOR_HOST_PROPERTY), 6188);
+    metricsServers = Servers.parse(conf.getString(COLLECTOR_PROPERTY), 6188);
 
     if (metricsServers == null || metricsServers.isEmpty()) {
       LOG.error("No Metric collector configured.");
     } else {
-      collectorUri = "http://" + conf.getString(COLLECTOR_HOST_PROPERTY).trim()
-          + "/ws/v1/timeline/metrics";
+      collectorUri = conf.getString(COLLECTOR_PROPERTY).trim()
+          + WS_V1_TIMELINE_METRICS;
+      if (collectorUri.toLowerCase().startsWith("https://")) {
+        String trustStorePath = conf.getString(SSL_KEYSTORE_PATH_PROPERTY).trim();
+        String trustStoreType = conf.getString(SSL_KEYSTORE_TYPE_PROPERTY).trim();
+        String trustStorePwd = conf.getString(SSL_KEYSTORE_PASSWORD_PROPERTY).trim();
+        loadTruststore(trustStorePath, trustStoreType, trustStorePwd);
+      }
     }
 
     LOG.info("Collector Uri: " + collectorUri);

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
index 6b23f36..528384e 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.metrics2.sink.timeline;
 
-import static org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink.COLLECTOR_HOST_PROPERTY;
+import static org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink.COLLECTOR_PROPERTY;
 import static org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink.MAX_METRIC_ROW_CACHE_SIZE;
 import static org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink.METRICS_SEND_INTERVAL;
 import static org.easymock.EasyMock.anyInt;
@@ -62,7 +62,7 @@ public class HadoopTimelineMetricsSinkTest {
     expect(conf.getString(eq("slave.host.name"))).andReturn("testhost").anyTimes();
     expect(conf.getParent()).andReturn(null).anyTimes();
     expect(conf.getPrefix()).andReturn("service").anyTimes();
-    expect(conf.getString(eq(COLLECTOR_HOST_PROPERTY))).andReturn("localhost:63188").anyTimes();
+    expect(conf.getString(eq(COLLECTOR_PROPERTY))).andReturn("localhost:63188").anyTimes();
     expect(conf.getString(eq("serviceName-prefix"), eq(""))).andReturn("").anyTimes();
 
     expect(conf.getInt(eq(MAX_METRIC_ROW_CACHE_SIZE), anyInt())).andReturn(10).anyTimes();
@@ -130,7 +130,7 @@ public class HadoopTimelineMetricsSinkTest {
     expect(conf.getString(eq("slave.host.name"))).andReturn("testhost").anyTimes();
     expect(conf.getParent()).andReturn(null).anyTimes();
     expect(conf.getPrefix()).andReturn("service").anyTimes();
-    expect(conf.getString(eq(COLLECTOR_HOST_PROPERTY))).andReturn("localhost:63188").anyTimes();
+    expect(conf.getString(eq(COLLECTOR_PROPERTY))).andReturn("localhost:63188").anyTimes();
     expect(conf.getString(eq("serviceName-prefix"), eq(""))).andReturn("").anyTimes();
 
     expect(conf.getInt(eq(MAX_METRIC_ROW_CACHE_SIZE), anyInt())).andReturn(10).anyTimes();

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/metric_monitor.ini
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/metric_monitor.ini b/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/metric_monitor.ini
index d302518..5952982 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/metric_monitor.ini
+++ b/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/metric_monitor.ini
@@ -22,6 +22,7 @@ metrics_server = localhost:{{ams_collector_port}}
 hostname = {{hostname}}
 enable_time_threshold = false
 enable_value_threshold = false
+https_enabled = false
 
 [emitter]
 send_interval = 60

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
index 48f5af2..d533537 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
@@ -91,6 +91,7 @@ debug_level = INFO
 metrics_server = host:port
 enable_time_threshold = false
 enable_value_threshold = false
+https_enabled = false
 
 [emitter]
 send_interval = 60
@@ -181,7 +182,7 @@ class Configuration:
 
   def get(self, section, key, default=None):
     try:
-      value = self.config.get(section, key)
+      value = str(self.config.get(section, key)).strip()
     except:
       return default
     return value
@@ -209,3 +210,6 @@ class Configuration:
 
   def get_max_queue_size(self):
     return int(self.get("collector", "max_queue_size", 5000))
+
+  def get_server_https_enabled(self):
+    return "true" == str(self.get("default", "https_enabled")).lower()

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
index 6f66093..4e39ab5 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
@@ -20,13 +20,12 @@ limitations under the License.
 
 import logging
 import threading
-import time
 import urllib2
 
 logger = logging.getLogger()
 
 class Emitter(threading.Thread):
-  COLLECTOR_URL = "http://{0}/ws/v1/timeline/metrics"
+  COLLECTOR_URL = "{0}://{1}/ws/v1/timeline/metrics"
   RETRY_SLEEP_INTERVAL = 5
   MAX_RETRY_COUNT = 3
   """
@@ -36,10 +35,12 @@ class Emitter(threading.Thread):
     threading.Thread.__init__(self)
     logger.debug('Initializing Emitter thread.')
     self.lock = threading.Lock()
-    self.collector_address = config.get_server_address()
     self.send_interval = config.get_send_interval()
     self._stop_handler = stop_handler
     self.application_metric_map = application_metric_map
+    # TODO verify certificate
+    protocol = 'https' if config.get_server_https_enabled() else 'http'
+    self.collector_url = self.COLLECTOR_URL.format(protocol, config.get_server_address())
 
   def run(self):
     logger.info('Running Emitter thread: %s' % threading.currentThread().getName())
@@ -54,7 +55,7 @@ class Emitter(threading.Thread):
         logger.info('Shutting down Emitter thread')
         return
     pass
-  
+
   def submit_metrics(self):
     retry_count = 0
     # This call will acquire lock on the map and clear contents before returning
@@ -73,7 +74,7 @@ class Emitter(threading.Thread):
       except Exception, e:
         logger.warn('Error sending metrics to server. %s' % str(e))
       pass
-  
+
       if response and response.getcode() == 200:
         retry_count = self.MAX_RETRY_COUNT
       else:
@@ -84,13 +85,12 @@ class Emitter(threading.Thread):
           return
       pass
     pass
-  
+    # TODO verify certificate
   def push_metrics(self, data):
     headers = {"Content-Type" : "application/json", "Accept" : "*/*"}
-    server = self.COLLECTOR_URL.format(self.collector_address.strip())
-    logger.info("server: %s" % server)
+    logger.info("server: %s" % self.collector_url)
     logger.debug("message to sent: %s" % data)
-    req = urllib2.Request(server, data, headers)
+    req = urllib2.Request(self.collector_url, data, headers)
     response = urllib2.urlopen(req, timeout=int(self.send_interval - 10))
     if response:
       logger.debug("POST response from server: retcode = {0}".format(response.getcode()))

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
index 4915435..d6d251c 100644
--- a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
@@ -63,11 +63,13 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
   private static final String TIMELINE_METRICS_MAX_ROW_CACHE_SIZE_PROPERTY = "kafka.timeline.metrics.maxRowCacheSize";
   private static final String TIMELINE_HOST_PROPERTY = "kafka.timeline.metrics.host";
   private static final String TIMELINE_PORT_PROPERTY = "kafka.timeline.metrics.port";
+  private static final String TIMELINE_PROTOCOL_PROPERTY = "kafka.timeline.metrics.protocol";
   private static final String TIMELINE_REPORTER_ENABLED_PROPERTY = "kafka.timeline.metrics.reporter.enabled";
   private static final String EXCLUDED_METRICS_PROPERTY = "external.kafka.metrics.exclude.prefix";
   private static final String INCLUDED_METRICS_PROPERTY = "external.kafka.metrics.include.prefix";
   private static final String TIMELINE_DEFAULT_HOST = "localhost";
-  private static final String TIMELINE_DEFAULT_PORT = "8188";
+  private static final String TIMELINE_DEFAULT_PORT = "6188";
+  private static final String TIMELINE_DEFAULT_PROTOCOL = "http";
 
   private boolean initialized = false;
   private boolean running = false;
@@ -117,8 +119,19 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
         int maxRowCacheSize = props.getInt(TIMELINE_METRICS_MAX_ROW_CACHE_SIZE_PROPERTY, MAX_RECS_PER_NAME_DEFAULT);
         String metricCollectorHost = props.getString(TIMELINE_HOST_PROPERTY, TIMELINE_DEFAULT_HOST);
         String metricCollectorPort = props.getString(TIMELINE_PORT_PROPERTY, TIMELINE_DEFAULT_PORT);
+        String metricCollectorProtocol = props.getString(TIMELINE_PROTOCOL_PROPERTY, TIMELINE_DEFAULT_PROTOCOL);
         setMetricsCache(new TimelineMetricsCache(maxRowCacheSize, metricsSendInterval));
-        collectorUri = "http://" + metricCollectorHost + ":" + metricCollectorPort + "/ws/v1/timeline/metrics";
+
+        collectorUri = metricCollectorProtocol + "://" + metricCollectorHost +
+                       ":" + metricCollectorPort + WS_V1_TIMELINE_METRICS;
+
+        if (collectorUri.toLowerCase().startsWith("https://")) {
+          String trustStorePath = props.getString(SSL_KEYSTORE_PATH_PROPERTY).trim();
+          String trustStoreType = props.getString(SSL_KEYSTORE_TYPE_PROPERTY).trim();
+          String trustStorePwd = props.getString(SSL_KEYSTORE_PASSWORD_PROPERTY).trim();
+          loadTruststore(trustStorePath, trustStoreType, trustStorePwd);
+        }
+
 
         // Exclusion policy
         String excludedMetricsStr = props.getString(EXCLUDED_METRICS_PROPERTY, "");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-kafka-sink/src/test/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporterTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-kafka-sink/src/test/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporterTest.java b/ambari-metrics/ambari-metrics-kafka-sink/src/test/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporterTest.java
index 6f2fc27..e0adb4b 100644
--- a/ambari-metrics/ambari-metrics-kafka-sink/src/test/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporterTest.java
+++ b/ambari-metrics/ambari-metrics-kafka-sink/src/test/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporterTest.java
@@ -79,7 +79,7 @@ public class KafkaTimelineMetricsReporterTest {
     properties.setProperty("kafka.timeline.metrics.sendInterval", "5900");
     properties.setProperty("kafka.timeline.metrics.maxRowCacheSize", "10000");
     properties.setProperty("kafka.timeline.metrics.host", "localhost");
-    properties.setProperty("kafka.timeline.metrics.port", "8188");
+    properties.setProperty("kafka.timeline.metrics.port", "6188");
     properties.setProperty("kafka.timeline.metrics.reporter.enabled", "true");
     properties.setProperty("external.kafka.metrics.exclude.prefix", "a.b.c");
     properties.setProperty("external.kafka.metrics.include.prefix", "a.b.c.d");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
index f054f16..ab5f1e4 100644
--- a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
@@ -24,7 +24,6 @@ import backtype.storm.generated.TopologySummary;
 import backtype.storm.metric.IClusterReporter;
 import backtype.storm.utils.NimbusClient;
 import backtype.storm.utils.Utils;
-import org.apache.commons.lang3.ClassUtils;
 import org.apache.commons.lang3.Validate;
 import org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
@@ -40,9 +39,7 @@ import java.util.Map;
 public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
   implements IClusterReporter {
 
-  public static final String COLLECTOR_HOST = "host";
-  public static final String COLLECTOR_PORT = "port";
-  public static final String METRICS_COLLECTOR = "metrics_collector";
+  public static final String METRICS_COLLECTOR_CATEGORY = "metrics_collector";
   public static final String APP_ID = "appId";
 
   private String hostname;
@@ -79,20 +76,25 @@ public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
         LOG.error("Could not identify hostname.");
         throw new RuntimeException("Could not identify hostname.", e);
       }
-      Validate.notNull(conf.get(METRICS_COLLECTOR), METRICS_COLLECTOR + " can not be null");
-      Map cf = (Map) conf.get(METRICS_COLLECTOR);
+      Validate.notNull(conf.get(METRICS_COLLECTOR_CATEGORY), METRICS_COLLECTOR_CATEGORY + " can not be null");
+      Map cf = (Map) conf.get(METRICS_COLLECTOR_CATEGORY);
       Map stormConf = Utils.readStormConfig();
       this.nimbusClient = NimbusClient.getConfiguredClient(stormConf);
-      String collectorHostname = cf.get(COLLECTOR_HOST).toString();
-      String port = cf.get(COLLECTOR_PORT).toString();
+      String collector = cf.get(COLLECTOR_PROPERTY).toString();
       timeoutSeconds = cf.get(METRICS_POST_TIMEOUT_SECONDS) != null ?
         Integer.parseInt(cf.get(METRICS_POST_TIMEOUT_SECONDS).toString()) :
         DEFAULT_POST_TIMEOUT_SECONDS;
       applicationId = cf.get(APP_ID).toString();
-      collectorUri = "http://" + collectorHostname + ":" + port + "/ws/v1/timeline/metrics";
+      collectorUri = collector + WS_V1_TIMELINE_METRICS;
+      if (collectorUri.toLowerCase().startsWith("https://")) {
+        String trustStorePath = cf.get(SSL_KEYSTORE_PATH_PROPERTY).toString().trim();
+        String trustStoreType = cf.get(SSL_KEYSTORE_TYPE_PROPERTY).toString().trim();
+        String trustStorePwd = cf.get(SSL_KEYSTORE_PASSWORD_PROPERTY).toString().trim();
+        loadTruststore(trustStorePath, trustStoreType, trustStorePwd);
+      }
     } catch (Exception e) {
-      LOG.warn("Could not initialize metrics collector, please specify host, " +
-        "port under $STORM_HOME/conf/config.yaml ", e);
+      LOG.warn("Could not initialize metrics collector, please specify " +
+        "protocol, host, port under $STORM_HOME/conf/config.yaml ", e);
     }
 
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
index 4208287..91c63b9 100644
--- a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
@@ -77,7 +77,13 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
     int metricsSendInterval = Integer.parseInt(configuration.getProperty(METRICS_SEND_INTERVAL,
         String.valueOf(MAX_EVICTION_TIME_MILLIS)));
     metricsCache = new TimelineMetricsCache(maxRowCacheSize, metricsSendInterval);
-    collectorUri = "http://" + configuration.getProperty(COLLECTOR_HOST_PROPERTY) + ":" + configuration.getProperty(COLLECTOR_PORT_PROPERTY) + "/ws/v1/timeline/metrics";
+    collectorUri = configuration.getProperty(COLLECTOR_PROPERTY) + WS_V1_TIMELINE_METRICS;
+    if (collectorUri.toLowerCase().startsWith("https://")) {
+      String trustStorePath = configuration.getProperty(SSL_KEYSTORE_PATH_PROPERTY).trim();
+      String trustStoreType = configuration.getProperty(SSL_KEYSTORE_TYPE_PROPERTY).trim();
+      String trustStorePwd = configuration.getProperty(SSL_KEYSTORE_PASSWORD_PROPERTY).trim();
+      loadTruststore(trustStorePath, trustStoreType, trustStorePwd);
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
index 11c162a..62a8cc3 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
@@ -22,6 +22,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.service.CompositeService;
@@ -170,15 +171,15 @@ public class ApplicationHistoryServer extends CompositeService {
     String bindAddress = metricConfiguration.getWebappAddress();
     LOG.info("Instantiating AHSWebApp at " + bindAddress);
     try {
+      Configuration conf = metricConfiguration.getMetricsConf();
+      HttpConfig.Policy policy = HttpConfig.Policy.valueOf(
+        conf.get(TimelineMetricConfiguration.TIMELINE_SERVICE_HTTP_POLICY,
+          HttpConfig.Policy.HTTP_ONLY.name()));
       webApp =
           WebApps
             .$for("applicationhistory", ApplicationHistoryClientService.class,
               ahsClientService, "ws")
-            .with(getConfig())
-            .withHttpSpnegoPrincipalKey(
-              YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL)
-            .withHttpSpnegoKeytabKey(
-              YarnConfiguration.TIMELINE_SERVICE_KEYTAB)
+            .withHttpPolicy(conf, policy)
             .at(bindAddress)
             .start(new AHSWebApp(timelineStore, timelineMetricStore,
               ahsClientService));

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java
index c6d84c2..0a9a513 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java
@@ -37,7 +37,7 @@ public class RestMetricsSender implements MetricsSender {
 
   /**
    * Creates unconnected RestMetricsSender with endpoint configured as
-   * http://${metricsHost}:8188/ws/v1/timeline/metrics,
+   * http://${metricsHost}:6188/ws/v1/timeline/metrics,
    * where ${metricsHost} is specified by metricHost param.
    *
    * @param metricsHost the hostname that will be used to access application metrics history service.
@@ -70,10 +70,6 @@ public class RestMetricsSender implements MetricsSender {
       if (responseString.length() > 0) {
         LOG.debug("POST response from server: " + responseString);
       }
-    } catch (MalformedURLException e) {
-      LOG.error("", e);
-    } catch (ProtocolException e) {
-      LOG.error("", e);
     } catch (IOException e) {
       LOG.error("", e);
     } finally {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
index cc9a2f3..3431ac8 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
-import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -205,6 +204,9 @@ public class TimelineMetricConfiguration {
   public static final String AGGREGATORS_SKIP_BLOCK_CACHE =
     "timeline.metrics.aggregators.skip.blockcache.enabled";
 
+  public static final String TIMELINE_SERVICE_HTTP_POLICY =
+    "timeline.metrics.service.http.policy";
+
   public static final String DISABLE_METRIC_METADATA_MGMT =
     "timeline.metrics.service.metadata.management.disabled";
 
@@ -262,7 +264,7 @@ public class TimelineMetricConfiguration {
   }
 
   public String getWebappAddress() {
-    String defaultHttpAddress = "0.0.0.0:8188";
+    String defaultHttpAddress = "0.0.0.0:6188";
     if (metricsConf != null) {
       return metricsConf.get(WEBAPP_HTTP_ADDRESS, defaultHttpAddress);
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/java/org/apache/ambari/server/configuration/ComponentSSLConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/ComponentSSLConfiguration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/ComponentSSLConfiguration.java
index a51b2ee..cb9651e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/ComponentSSLConfiguration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/ComponentSSLConfiguration.java
@@ -21,7 +21,7 @@ package org.apache.ambari.server.configuration;
  * Configuration for SSL communication between Ambari and 3rd party services.
  * Currently, the following services are supported with SSL communication:
  * <ul>
- * <li>Ganglia</li>
+ * <li>Ambari metrics</li>
  * </ul>
  */
 public class ComponentSSLConfiguration {
@@ -32,7 +32,7 @@ public class ComponentSSLConfiguration {
   private String truststorePath;
   private String truststorePassword;
   private String truststoreType;
-  private boolean gangliaSSL;
+  private boolean httpsEnabled;
 
   /**
    * The singleton.
@@ -60,7 +60,7 @@ public class ComponentSSLConfiguration {
     truststorePath     = configuration.getProperty(Configuration.SSL_TRUSTSTORE_PATH_KEY);
     truststorePassword = getPassword(configuration);
     truststoreType     = configuration.getProperty(Configuration.SSL_TRUSTSTORE_TYPE_KEY);
-    gangliaSSL         = Boolean.parseBoolean(configuration.getProperty(Configuration.GANGLIA_HTTPS_KEY));
+    httpsEnabled = Boolean.parseBoolean(configuration.getProperty(Configuration.AMRABI_METRICS_HTTPS_ENABLED_KEY));
   }
 
 
@@ -94,12 +94,12 @@ public class ComponentSSLConfiguration {
   }
 
   /**
-   * Indicates whether or not Ganglia is setup for SSL.
+   * Indicates whether or not Ambari Metrics is setup for SSL.
    *
-   * @return true if Ganglia is setup for SSL
+   * @return true if AMS is setup for SSL
    */
-  public boolean isGangliaSSL() {
-    return gangliaSSL;
+  public boolean isHttpsEnabled() {
+    return httpsEnabled;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index 35e9b6f..cfc2f51 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -242,7 +242,6 @@ public class Configuration {
   public static final String JAVAX_SSL_TRUSTSTORE = "javax.net.ssl.trustStore";
   public static final String JAVAX_SSL_TRUSTSTORE_PASSWORD = "javax.net.ssl.trustStorePassword";
   public static final String JAVAX_SSL_TRUSTSTORE_TYPE = "javax.net.ssl.trustStoreType";
-  public static final String GANGLIA_HTTPS_KEY = "ganglia.https";
   public static final String SRVR_TWO_WAY_SSL_PORT_DEFAULT = "8441";
   public static final String SRVR_ONE_WAY_SSL_PORT_DEFAULT = "8440";
   public static final String SRVR_CRT_NAME_DEFAULT = "ca.crt";
@@ -483,6 +482,9 @@ public class Configuration {
   private static final String DEFAULT_TIMELINE_METRICS_CACHE_HEAP_PERCENT = "15%";
   private static final String TIMELINE_METRICS_CACHE_USE_CUSTOM_SIZING_ENGINE = "server.timeline.metrics.cache.use.custom.sizing.engine";
 
+  // Timeline Metrics SSL settings
+  public static final String AMRABI_METRICS_HTTPS_ENABLED_KEY = "server.timeline.metrics.https.enabled";
+
   /**
    * Governs the use of {@link Parallel} to process {@link StageEntity}
    * instances into {@link Stage}.
@@ -921,8 +923,6 @@ public class Configuration {
       jsonObject = jsonElement.getAsJsonObject();
     } catch (FileNotFoundException e) {
       throw new IllegalArgumentException("No file " + file, e);
-    } catch (IOException ioe){
-      throw new IllegalArgumentException("Can't read file " + file, ioe);
     }
 
     return jsonObject;

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java
index c62b00c..16e2cd4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java
@@ -253,7 +253,7 @@ public abstract class GangliaPropertyProvider extends MetricsPropertyProvider {
     
     URIBuilder uriBuilder = new URIBuilder();
 
-    if (configuration.isGangliaSSL()) {
+    if (configuration.isHttpsEnabled()) {
       uriBuilder.setScheme("https");
     } else {
       uriBuilder.setScheme("http");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProvider.java
index e8dc7d1..80ecae6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProvider.java
@@ -209,7 +209,7 @@ public class GangliaReportPropertyProvider extends MetricsReportPropertyProvider
 
     StringBuilder sb = new StringBuilder();
 
-    if (configuration.isGangliaSSL()) {
+    if (configuration.isHttpsEnabled()) {
       sb.append("https://");
     } else {
       sb.append("http://");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProvider.java
index 3ecb520..a674371 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProvider.java
@@ -34,7 +34,6 @@ import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.spi.TemporalInfo;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.controller.utilities.StreamProvider;
 import org.apache.ambari.server.state.StackId;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
@@ -625,7 +624,8 @@ public abstract class AMSPropertyProvider extends MetricsPropertyProvider {
             if (metricsRequest == null) {
               metricsRequest = new MetricsRequest(temporalInfo,
                 getAMSUriBuilder(collectorHostName,
-                  collectorPort != null ? Integer.parseInt(collectorPort) : COLLECTOR_DEFAULT_PORT),
+                  collectorPort != null ? Integer.parseInt(collectorPort) : COLLECTOR_DEFAULT_PORT,
+                  configuration.isHttpsEnabled()),
                   (String) resource.getPropertyValue(clusterNamePropertyId));
               requests.put(temporalInfo, metricsRequest);
             }
@@ -643,9 +643,9 @@ public abstract class AMSPropertyProvider extends MetricsPropertyProvider {
     return requestMap;
   }
 
-  static URIBuilder getAMSUriBuilder(String hostname, int port) {
+  static URIBuilder getAMSUriBuilder(String hostname, int port, boolean httpsEnabled) {
     URIBuilder uriBuilder = new URIBuilder();
-    uriBuilder.setScheme("http");
+    uriBuilder.setScheme(httpsEnabled ? "https" : "http");
     uriBuilder.setHost(hostname);
     uriBuilder.setPort(port);
     uriBuilder.setPath("/ws/v1/timeline/metrics");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProvider.java
index e40a5a5..306390c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProvider.java
@@ -174,7 +174,7 @@ public class AMSReportPropertyProvider extends MetricsReportPropertyProvider {
     String host = hostProvider.getCollectorHostName(clusterName, TIMELINE_METRICS);
     String port = hostProvider.getCollectorPort(clusterName, TIMELINE_METRICS);
     URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder(host,
-      port != null ? Integer.parseInt(port) : 8188);
+      port != null ? Integer.parseInt(port) : 6188, configuration.isHttpsEnabled());
 
     for (Map.Entry<String, MetricReportRequest> entry : reportRequestMap.entrySet()) {
       MetricReportRequest reportRequest = entry.getValue();

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metainfo.xml
index f40da62..bd9f357 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metainfo.xml
@@ -172,6 +172,7 @@
       <configuration-dependencies>
         <config-type>accumulo-env</config-type>
         <config-type>accumulo-site</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
 
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
index a9626b6..2bd2626 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
@@ -131,6 +131,13 @@ if has_metric_collector:
       metric_collector_port = metric_collector_web_address.split(':')[1]
     else:
       metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
index 399e0c9..c2c12a5 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
@@ -35,7 +35,12 @@ rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
 accumulo.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 accumulo.sink.timeline.period={{metrics_collection_period}}
 accumulo.sink.timeline.sendInterval={{metrics_report_interval}}000
-accumulo.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+accumulo.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+
+# HTTPS properties
+accumulo.sink.timeline.truststore.path = {{metric_truststore_path}}
+accumulo.sink.timeline.truststore.type = {{metric_truststore_type}}
+accumulo.sink.timeline.truststore.password = {{metric_truststore_password}}
 
 {% else %}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
index ab9593d..08bf47c 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
@@ -484,7 +484,6 @@
       </property>
     </depends-on>
   </property>
-
   <property>
     <name>timeline.metrics.sink.report.interval</name>
     <value>60</value>
@@ -553,5 +552,16 @@
       utilization only for user queries.
     </description>
   </property>
+  <property>
+    <name>timeline.metrics.service.http.policy</name>
+    <value>HTTP_ONLY</value>
+    <description>
+      This configures the HTTP endpoint for Yarn Application History Server for
+      Ambari Metrics System.
+      The following values are supported:
+      - HTTP_ONLY : Service is provided only on http
+      - HTTPS_ONLY : Service is provided only on https
+    </description>
+  </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-client.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-client.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-client.xml
new file mode 100644
index 0000000..31586ee
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-client.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>ssl.client.truststore.location</name>
+        <value>/etc/security/clientKeys/all.jks</value>
+        <description>Location of the trust store file.</description>
+    </property>
+    <property>
+        <name>ssl.client.truststore.type</name>
+        <value>jks</value>
+        <description>Optional. Default value is "jks".</description>
+    </property>
+    <property>
+        <name>ssl.client.truststore.password</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password to open the trust store file.</description>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-server.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-server.xml
new file mode 100644
index 0000000..f95793e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-server.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>ssl.server.truststore.location</name>
+        <value>/etc/security/serverKeys/all.jks</value>
+        <description>Location of the trust store file.</description>
+    </property>
+    <property>
+        <name>ssl.server.truststore.type</name>
+        <value>jks</value>
+        <description>Optional. Default value is "jks".</description>
+    </property>
+    <property>
+        <name>ssl.server.truststore.password</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password to open the trust store file.</description>
+    </property>
+    <property>
+        <name>ssl.server.truststore.reload.interval</name>
+        <value>10000</value>
+        <description>Truststore reload interval, in milliseconds.</description>
+    </property>
+    <property>
+        <name>ssl.server.keystore.type</name>
+        <value>jks</value>
+        <description>Optional. Default value is "jks".</description>
+    </property>
+    <property>
+        <name>ssl.server.keystore.location</name>
+        <value>/etc/security/serverKeys/keystore.jks</value>
+        <description>Location of the keystore file.</description>
+    </property>
+    <property>
+        <name>ssl.server.keystore.password</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password to open the keystore file.</description>
+    </property>
+    <property>
+        <name>ssl.server.keystore.keypassword</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password for private key in keystore file.</description>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
index 67a93a5..2f2f116 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
@@ -155,6 +155,8 @@
         <config-type>ams-hbase-log4j</config-type>
         <config-type>ams-grafana-env</config-type>
         <config-type>ams-grafana-ini</config-type>
+        <config-type>ams-ssl-server</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
 
       <excluded-config-types>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
index 40188c2..417574b 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
@@ -204,6 +204,14 @@ def ams(name=None):
               group=params.user_group
     )
 
+    XmlConfig("ssl-server.xml",
+              conf_dir=params.ams_collector_conf_dir,
+              configurations=params.config['configurations']['ams-ssl-server'],
+              configuration_attributes=params.config['configuration_attributes']['ams-ssl-server'],
+              owner=params.ams_user,
+              group=params.user_group
+    )
+
     merged_ams_hbase_site = {}
     merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-site'])
     if params.security_enabled:

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
index 5ab40b0..02caa11 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
@@ -40,7 +40,7 @@ def create_ams_datasource():
   Logger.info("Connecting (GET) to %s:%s%s" % (params.hostname,
                                                params.ams_grafana_port,
                                                GRAFANA_URL))
-
+# TODO add https support
   conn = httplib.HTTPConnection(params.hostname,
                                 int(params.ams_grafana_port))
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index 89a60f7..f3a97fc 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -50,6 +50,16 @@ ams_pid_dir = status_params.ams_collector_pid_dir
 ams_collector_script = "/usr/sbin/ambari-metrics-collector"
 ams_collector_pid_dir = status_params.ams_collector_pid_dir
 ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+  metric_collector_https_enabled = True
+  metric_collector_protocol = 'https'
+else:
+  metric_collector_https_enabled = False
+  metric_collector_protocol = 'http'
+metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
 if 'cluster-env' in config['configurations'] and \
     'metrics_collector_vip_host' in config['configurations']['cluster-env']:
   metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
index f19c823..4346f0f 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
@@ -79,8 +79,9 @@ class AMSServiceCheck(Script):
         Logger.info("Connecting (POST) to %s:%s%s" % (params.metric_collector_host,
                                                       params.metric_collector_port,
                                                       self.AMS_METRICS_POST_URL))
-        conn = httplib.HTTPConnection(params.metric_collector_host,
-                                        int(params.metric_collector_port))
+        conn = self.get_http_connection(params.metric_collector_host,
+                                        int(params.metric_collector_port),
+                                        params.metric_collector_https_enabled)
         conn.request("POST", self.AMS_METRICS_POST_URL, metric_json, headers)
 
         response = conn.getresponse()
@@ -127,8 +128,9 @@ class AMSServiceCheck(Script):
                                                  params.metric_collector_port,
                                               self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters))
 
-    conn = httplib.HTTPConnection(params.metric_collector_host,
-                                  int(params.metric_collector_port))
+    conn = self.get_http_connection(params.metric_collector_host,
+                                    int(params.metric_collector_port),
+                                    params.metric_collector_https_enabled)
     conn.request("GET", self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters)
     response = conn.getresponse()
     Logger.info("Http response: %s %s" % (response.status, response.reason))
@@ -161,6 +163,13 @@ class AMSServiceCheck(Script):
 
     Logger.info("Ambari Metrics service check is finished.")
 
+  def get_http_connection(self, host, port, https_enabled=False):
+    if https_enabled:
+      # TODO verify certificate
+      return httplib.HTTPSConnection(host, port)
+    else:
+      return httplib.HTTPConnection(host, port)
+
 if __name__ == "__main__":
   AMSServiceCheck().execute()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
index a65ea88..1591d82 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
@@ -55,9 +55,14 @@ rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
 hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 hbase.sink.timeline.period={{metrics_collection_period}}
 hbase.sink.timeline.sendInterval={{metrics_report_interval}}000
-hbase.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+hbase.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
 hbase.sink.timeline.serviceName-prefix=ams
 
+# HTTPS properties
+hbase.sink.timeline.truststore.path = {{metric_truststore_path}}
+hbase.sink.timeline.truststore.type = {{metric_truststore_type}}
+hbase.sink.timeline.truststore.password = {{metric_truststore_password}}
+
 # Switch off metrics generation on a per region basis
 *.source.filter.class=org.apache.hadoop.metrics2.filter.GlobFilter
 hbase.*.source.filter.exclude=*Regions*

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
index fc86a58..4e2d0f5 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
@@ -22,6 +22,7 @@ metrics_server = {{metric_collector_host}}:{{metric_collector_port}}
 hostname = {{hostname}}
 enable_time_threshold = false
 enable_value_threshold = false
+https_enabled = {{metric_collector_https_enabled}}
 
 [emitter]
 send_interval = {{metrics_report_interval}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metainfo.xml
index 5e5e4a7..0396c28 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metainfo.xml
@@ -58,6 +58,7 @@
       <configuration-dependencies>
         <config-type>flume-env</config-type>
         <config-type>flume-conf</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
 
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
index 28ee36b..5ec879c 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
@@ -101,6 +101,13 @@ if has_metric_collector:
       metric_collector_port = metric_collector_web_address.split(':')[1]
     else:
       metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
index 753aa3f..df68242 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
@@ -16,11 +16,15 @@
 # limitations under the License.
 #}
 
-collector={{metric_collector_host}}
-port={{metric_collector_port}}
+collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
 collectionFrequency={{metrics_collection_period}}000
 maxRowCacheSize=10000
 sendInterval={{metrics_report_interval}}000
 
+# HTTPS properties
+truststore.path = {{metric_truststore_path}}
+truststore.type = {{metric_truststore_type}}
+truststore.password = {{metric_truststore_password}}
+
 # Metric names having type COUNTER
 counters=EventTakeSuccessCount,EventPutSuccessCount,EventTakeAttemptCount,EventPutAttemptCount

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/metainfo.xml
index ce625eb..7a71604 100644
--- a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/metainfo.xml
@@ -149,6 +149,7 @@
         <config-type>yarn-client</config-type>
         <config-type>hawq-limits-env</config-type>
         <config-type>hawq-sysctl-env</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
     </service>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
index 78aca3d..057e126 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
@@ -147,6 +147,7 @@
         <config-type>ranger-hbase-audit</config-type>
         <config-type>ranger-hbase-policymgr-ssl</config-type>
         <config-type>ranger-hbase-security</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
 
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 6bbf379..6837bf1 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -149,6 +149,14 @@ if has_metric_collector:
       metric_collector_port = metric_collector_web_address.split(':')[1]
     else:
       metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
index f74ea4c..3202605 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
@@ -66,7 +66,12 @@ rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
 hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 hbase.sink.timeline.period={{metrics_collection_period}}
 hbase.sink.timeline.sendInterval={{metrics_report_interval}}000
-hbase.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+hbase.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+
+# HTTPS properties
+hbase.sink.timeline.truststore.path = {{metric_truststore_path}}
+hbase.sink.timeline.truststore.type = {{metric_truststore_type}}
+hbase.sink.timeline.truststore.password = {{metric_truststore_password}}
 
 {% else %}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
index 2df141f..5a91866 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
@@ -65,7 +65,12 @@ rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
 hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 hbase.sink.timeline.period={{metrics_collection_period}}
 hbase.sink.timeline.sendInterval={{metrics_report_interval}}000
-hbase.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+hbase.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+
+# HTTPS properties
+hbase.sink.timeline.truststore.path = {{metric_truststore_path}}
+hbase.sink.timeline.truststore.type = {{metric_truststore_type}}
+hbase.sink.timeline.truststore.password = {{metric_truststore_password}}
 
 {% else %}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
index 73616ae..75d3bea 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
@@ -245,6 +245,7 @@
         <config-type>ranger-hdfs-audit</config-type>
         <config-type>ranger-hdfs-policymgr-ssl</config-type>
         <config-type>ranger-hdfs-security</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
       <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-broker.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-broker.xml
index 6d0c8c6..704d73f 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-broker.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-broker.xml
@@ -325,6 +325,27 @@
     <description>Timeline port</description>
   </property>
   <property>
+  <property>
+    <name>kafka.timeline.metrics.protocol</name>
+    <value>{{metric_collector_protocol}}</value>
+    <description>Timeline protocol(http or https)</description>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.truststore.path</name>
+    <value>{{metric_truststore_path}}</value>
+    <description>Location of the trust store file.</description>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.truststore.type</name>
+    <value>{{metric_truststore_type}}</value>
+    <description>Optional. Default value is "jks".</description>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.truststore.password</name>
+    <value>{{metric_truststore_password}}</value>
+    <description>Password to open the trust store file.</description>
+  </property>
+  <property>
     <name>kafka.timeline.metrics.reporter.sendInterval</name>
     <value>5900</value>
     <description>Timeline metrics reporter send interval</description>


[34/50] [abbrv] ambari git commit: AMBARI-15043. ambari server upstart support (aonishuk)

Posted by jo...@apache.org.
AMBARI-15043. ambari server upstart support (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0ff86b1c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0ff86b1c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0ff86b1c

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 0ff86b1c869ea1a3e13087e37225dd99a8e37926
Parents: 230c1d6
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Mon Feb 15 14:18:45 2016 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Feb 15 14:18:45 2016 +0200

----------------------------------------------------------------------
 ambari-server/etc/init/ambari-server.conf       | 33 ++++++++++++++++++++
 ambari-server/src/main/assemblies/server.xml    |  4 +++
 .../src/main/python/ambari_server/utils.py      |  4 +--
 .../src/main/python/ambari_server_main.py       | 19 +++++++++--
 4 files changed, 55 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0ff86b1c/ambari-server/etc/init/ambari-server.conf
----------------------------------------------------------------------
diff --git a/ambari-server/etc/init/ambari-server.conf b/ambari-server/etc/init/ambari-server.conf
new file mode 100644
index 0000000..ba92a43
--- /dev/null
+++ b/ambari-server/etc/init/ambari-server.conf
@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific
+
+#ambari-server
+description     "ambari server"
+
+stop on runlevel [06]
+
+env PIDFILE=/var/run/ambari-server/ambari-server.pid
+
+respawn
+
+script
+  . /etc/environment
+
+  export AMBARI_SERVER_RUN_IN_FOREGROUND=true
+  exec /etc/init.d/ambari-server start
+end script
+
+post-stop script
+  rm -f $PIDFILE
+end script
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/0ff86b1c/ambari-server/src/main/assemblies/server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/assemblies/server.xml b/ambari-server/src/main/assemblies/server.xml
index ca74185..e1a4919 100644
--- a/ambari-server/src/main/assemblies/server.xml
+++ b/ambari-server/src/main/assemblies/server.xml
@@ -170,6 +170,10 @@
       <directory>src/main/resources/host_scripts</directory>
       <outputDirectory>/var/lib/ambari-server/resources/host_scripts</outputDirectory>
     </fileSet>
+    <fileSet>
+      <directory>etc/init</directory>
+      <outputDirectory>/etc/init</outputDirectory>
+    </fileSet>
   </fileSets>
   <!-- Single files. Syntax:
 	  <files>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0ff86b1c/ambari-server/src/main/python/ambari_server/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/utils.py b/ambari-server/src/main/python/ambari_server/utils.py
index 49af869..c4990f5 100644
--- a/ambari-server/src/main/python/ambari_server/utils.py
+++ b/ambari-server/src/main/python/ambari_server/utils.py
@@ -96,7 +96,7 @@ def save_pid(pid, pidfile):
       pass
 
 
-def save_main_pid_ex(pids, pidfile, exclude_list=[], kill_exclude_list=False):
+def save_main_pid_ex(pids, pidfile, exclude_list=[], kill_exclude_list=False, skip_daemonize=False):
   """
     Save pid which is not included to exclude_list to pidfile.
     If kill_exclude_list is set to true,  all processes in that
@@ -109,7 +109,7 @@ def save_main_pid_ex(pids, pidfile, exclude_list=[], kill_exclude_list=False):
     for item in pids:
       if pid_exists(item["pid"]) and (item["exe"] not in exclude_list):
         pfile.write("%s\n" % item["pid"])
-      if pid_exists(item["pid"]) and (item["exe"] in exclude_list):
+      if pid_exists(item["pid"]) and (item["exe"] in exclude_list) and not skip_daemonize:
         try:
           os.kill(int(item["pid"]), signal.SIGKILL)
         except:

http://git-wip-us.apache.org/repos/asf/ambari/blob/0ff86b1c/ambari-server/src/main/python/ambari_server_main.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server_main.py b/ambari-server/src/main/python/ambari_server_main.py
index 5c98bf4..8be3239 100644
--- a/ambari-server/src/main/python/ambari_server_main.py
+++ b/ambari-server/src/main/python/ambari_server_main.py
@@ -55,6 +55,9 @@ if ambari_provider_module is not None:
 
 jvm_args = os.getenv('AMBARI_JVM_ARGS', '-Xms512m -Xmx2048m')
 
+ENV_FOREGROUND_KEY = "AMBARI_SERVER_RUN_IN_FOREGROUND"
+IS_FOREGROUND = ENV_FOREGROUND_KEY in os.environ and os.environ[ENV_FOREGROUND_KEY].lower() == "true"
+
 SERVER_START_CMD = "{0} " \
     "-server -XX:NewRatio=3 " \
     "-XX:+UseConcMarkSweepGC " + \
@@ -63,7 +66,7 @@ SERVER_START_CMD = "{0} " \
     "{1} {2} " \
     "-cp {3} "\
     "org.apache.ambari.server.controller.AmbariServer " \
-    "> {4} 2>&1 || echo $? > {5} &"
+    "> {4} 2>&1 || echo $? > {5}"
 SERVER_START_CMD_DEBUG = "{0} " \
     "-server -XX:NewRatio=2 " \
     "-XX:+UseConcMarkSweepGC " + \
@@ -72,7 +75,11 @@ SERVER_START_CMD_DEBUG = "{0} " \
     "server=y,suspend={6} " \
     "-cp {3} " + \
     "org.apache.ambari.server.controller.AmbariServer " \
-    "> {4} 2>&1 || echo $? > {5} &"
+    "> {4} 2>&1 || echo $? > {5}"
+    
+if not IS_FOREGROUND:
+  SERVER_START_CMD += " &"
+  SERVER_START_CMD_DEBUG += " &"
 
 SERVER_START_CMD_WINDOWS = "{0} " \
     "-server -XX:NewRatio=3 " \
@@ -199,7 +206,7 @@ def wait_for_server_start(pidFile, scmStatus):
   else:
     save_main_pid_ex(pids, pidFile, [locate_file('sh', '/bin'),
                                      locate_file('bash', '/bin'),
-                                     locate_file('dash', '/bin')], True)
+                                     locate_file('dash', '/bin')], True, IS_FOREGROUND)
 
 
 def server_process_main(options, scmStatus=None):
@@ -289,6 +296,9 @@ def server_process_main(options, scmStatus=None):
   # The launched shell process and sub-processes should have a group id that
   # is different from the parent.
   def make_process_independent():
+    if IS_FOREGROUND: # upstart script is not able to track process from different pgid.
+      return
+    
     processId = os.getpid()
     if processId > 0:
       try:
@@ -322,5 +332,8 @@ def server_process_main(options, scmStatus=None):
 
   if scmStatus is not None:
     scmStatus.reportStarted()
+    
+  if IS_FOREGROUND:
+    procJava.communicate()
 
   return procJava


[10/50] [abbrv] ambari git commit: AMBARI-15017 RU/EU has to calculate ranger-env config ranger_privelege_user_jdbc_url (dsen)

Posted by jo...@apache.org.
AMBARI-15017 RU/EU has to calculate ranger-env config ranger_privelege_user_jdbc_url (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a98adb7e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a98adb7e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a98adb7e

Branch: refs/heads/branch-dev-patch-upgrade
Commit: a98adb7eb9c61b40a2724f61cc45fc9e70504c2a
Parents: 9419400
Author: Dmytro Sen <ds...@apache.org>
Authored: Fri Feb 12 00:21:51 2016 +0200
Committer: Dmytro Sen <ds...@apache.org>
Committed: Fri Feb 12 00:21:51 2016 +0200

----------------------------------------------------------------------
 .../upgrades/RangerConfigCalculation.java       | 14 +++++++++-
 .../upgrades/RangerConfigCalculationTest.java   | 27 ++++++++++++++++++++
 2 files changed, 40 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a98adb7e/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
index ebec177..ff4a20e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
@@ -37,6 +37,8 @@ import com.google.inject.Inject;
  */
 public class RangerConfigCalculation extends AbstractServerAction {
   private static final String SOURCE_CONFIG_TYPE = "admin-properties";
+  private static final String RANGER_ENV_CONFIG_TYPE = "ranger-env";
+  private static final String RANGER_ADMIN_SITE_CONFIG_TYPE = "ranger-admin-site";
 
   @Inject
   private Clusters m_clusters;
@@ -101,6 +103,7 @@ public class RangerConfigCalculation extends AbstractServerAction {
     String url = null;
     String dialect = null;
     String auditUrl = null;
+    String userJDBCUrl = null;
 
     if ("mysql".equals(db)) {
       if (null == dbName) {
@@ -112,19 +115,22 @@ public class RangerConfigCalculation extends AbstractServerAction {
       url = MessageFormat.format("jdbc:mysql://{0}/{1}", dbHost, dbName);
       auditUrl = MessageFormat.format("jdbc:mysql://{0}/{1}", dbHost, auditDbName);
       dialect = "org.eclipse.persistence.platform.database.MySQLPlatform";
+      userJDBCUrl = MessageFormat.format("jdbc:mysql://{0}", dbHost);
     } else if ("oracle".equals(db)) {
       driver = "oracle.jdbc.OracleDriver";
       url = MessageFormat.format("jdbc:oracle:thin:@//{0}", dbHost);
       auditUrl = MessageFormat.format("jdbc:oracle:thin:@//{0}", dbHost);
       dialect = "org.eclipse.persistence.platform.database.OraclePlatform";
+      userJDBCUrl = MessageFormat.format("jdbc:oracle:thin:@//{0}", dbHost);
     }
 
     stdout.append(MessageFormat.format("Database driver: {0}\n", driver));
     stdout.append(MessageFormat.format("Database url: {0}\n", url));
     stdout.append(MessageFormat.format("Database audit url: {0}\n", auditUrl));
     stdout.append(MessageFormat.format("Database dialect: {0}", dialect));
+    stdout.append(MessageFormat.format("Database user jdbc url: {0}", userJDBCUrl));
 
-    Config config = cluster.getDesiredConfigByType("ranger-admin-site");
+    Config config = cluster.getDesiredConfigByType(RANGER_ADMIN_SITE_CONFIG_TYPE);
     Map<String, String> targetValues = config.getProperties();
     targetValues.put("ranger.jpa.jdbc.driver", driver);
     targetValues.put("ranger.jpa.jdbc.url", url);
@@ -137,6 +143,12 @@ public class RangerConfigCalculation extends AbstractServerAction {
     config.setProperties(targetValues);
     config.persist(false);
 
+    config = cluster.getDesiredConfigByType(RANGER_ENV_CONFIG_TYPE);
+    targetValues = config.getProperties();
+    targetValues.put("ranger_privelege_user_jdbc_url", userJDBCUrl);
+    config.setProperties(targetValues);
+    config.persist(false);
+
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", stdout.toString(), "");
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a98adb7e/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
index 1bd0de4..e673714 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
@@ -88,8 +88,27 @@ public class RangerConfigCalculationTest {
       }
     };
 
+    Config rangerEnv = new ConfigImpl("ranger-env") {
+      Map<String, String> mockProperties = new HashMap<String, String>();
+      @Override
+      public Map<String, String> getProperties() {
+        return mockProperties;
+      }
+
+      @Override
+      public void setProperties(Map<String, String> properties) {
+        mockProperties.putAll(properties);
+      }
+
+      @Override
+      public void persist(boolean newConfig) {
+        // no-op
+      }
+    };
+
     expect(cluster.getDesiredConfigByType("admin-properties")).andReturn(adminConfig).atLeastOnce();
     expect(cluster.getDesiredConfigByType("ranger-admin-site")).andReturn(adminSiteConfig).atLeastOnce();
+    expect(cluster.getDesiredConfigByType("ranger-env")).andReturn(rangerEnv).atLeastOnce();
 
     expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
@@ -144,6 +163,10 @@ public class RangerConfigCalculationTest {
     assertEquals("jdbc:mysql://host1/ranger_audit", map.get("ranger.jpa.audit.jdbc.url"));
     assertEquals("org.eclipse.persistence.platform.database.MySQLPlatform", map.get("ranger.jpa.audit.jdbc.dialect"));
 
+    config = c.getDesiredConfigByType("ranger-env");
+    map = config.getProperties();
+    assertEquals("jdbc:mysql://host1", map.get("ranger_privelege_user_jdbc_url"));
+
     config = c.getDesiredConfigByType("admin-properties");
     config.getProperties().put("DB_FLAVOR", "oracle");
 
@@ -161,6 +184,10 @@ public class RangerConfigCalculationTest {
     assertEquals("jdbc:oracle:thin:@//host1", map.get("ranger.jpa.audit.jdbc.url"));
     assertEquals("org.eclipse.persistence.platform.database.OraclePlatform", map.get("ranger.jpa.audit.jdbc.dialect"));
 
+    config = c.getDesiredConfigByType("ranger-env");
+    map = config.getProperties();
+    assertEquals("jdbc:oracle:thin:@//host1", map.get("ranger_privelege_user_jdbc_url"));
+
   }