You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2016/02/23 22:21:46 UTC

[01/11] ambari git commit: AMBARI-15142 Small refactor for configs. (ababiichuk)

Repository: ambari
Updated Branches:
  refs/heads/branch-dev-patch-upgrade cbef0c146 -> e06d95d1b


AMBARI-15142 Small refactor for configs. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ea6a7a61
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ea6a7a61
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ea6a7a61

Branch: refs/heads/branch-dev-patch-upgrade
Commit: ea6a7a614190917abaa22dbec5895cb9b66b13b4
Parents: 9310ab7
Author: ababiichuk <ab...@hortonworks.com>
Authored: Tue Feb 23 17:02:00 2016 +0200
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Tue Feb 23 17:02:00 2016 +0200

----------------------------------------------------------------------
 .../resourceManager/step3_controller.js         |   2 +-
 .../main/admin/kerberos/step4_controller.js     |   3 +-
 .../main/admin/serviceAccounts_controller.js    |   5 +-
 .../controllers/main/service/info/configs.js    |  32 +++-
 ambari-web/app/controllers/wizard.js            |   1 -
 .../app/controllers/wizard/step7_controller.js  |  66 +------
 .../configs/stack_config_properties_mapper.js   |   4 +-
 .../common/kdc_credentials_controller_mixin.js  |   2 +-
 ambari-web/app/models/stack_service.js          |  15 +-
 ambari-web/app/utils.js                         |   1 +
 ambari-web/app/utils/config.js                  | 187 ++++++-------------
 ambari-web/app/utils/configs/theme/theme.js     | 103 ++++++++++
 .../configs/widgets/config_widget_view.js       |   2 +-
 .../admin/kerberos/step4_controller_test.js     |   9 +-
 .../test/controllers/wizard/step7_test.js       |   7 +-
 ambari-web/test/utils/config_test.js            | 126 +------------
 16 files changed, 227 insertions(+), 338 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/app/controllers/main/admin/highAvailability/resourceManager/step3_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/highAvailability/resourceManager/step3_controller.js b/ambari-web/app/controllers/main/admin/highAvailability/resourceManager/step3_controller.js
index d2b7a86..0ec632b 100644
--- a/ambari-web/app/controllers/main/admin/highAvailability/resourceManager/step3_controller.js
+++ b/ambari-web/app/controllers/main/admin/highAvailability/resourceManager/step3_controller.js
@@ -142,7 +142,7 @@ App.RMHighAvailabilityWizardStep3Controller = Em.Controller.extend({
     var dependencies = this._prepareDependencies(data);
     /** add dynamic property 'hadoop.proxyuser.' + yarnUser + '.hosts' **/
     var proxyUserConfig = App.ServiceConfigProperty.create(App.config.createDefaultConfig('hadoop.proxyuser.' + yarnUser + '.hosts',
-      'MISC', 'core-site', false,  {category : "HDFS", isUserProperty: false, isEditable: false, isOverridable: false}));
+      'core-site', false,  {category : "HDFS", isUserProperty: false, isEditable: false, isOverridable: false, serviceName: 'MISC'}));
     configs.configs.pushObject(proxyUserConfig);
 
     configs.configs.forEach(function (config) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/app/controllers/main/admin/kerberos/step4_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/kerberos/step4_controller.js b/ambari-web/app/controllers/main/admin/kerberos/step4_controller.js
index e83fe80..1eb52f1 100644
--- a/ambari-web/app/controllers/main/admin/kerberos/step4_controller.js
+++ b/ambari-web/app/controllers/main/admin/kerberos/step4_controller.js
@@ -290,6 +290,7 @@ App.KerberosWizardStep4Controller = App.WizardStep7Controller.extend(App.AddSecu
     var credentialProperties = this.get('adminPropertyNames').map(function(prop, index) {
       var existingProperty = krbProperties.findProperty('name', prop.name);
       var coreObject = {
+        serviceName: 'Cluster',
         displayName: prop.displayName,
         isRequired: false,
         isRequiredByAgent: false,
@@ -301,7 +302,7 @@ App.KerberosWizardStep4Controller = App.WizardStep7Controller.extend(App.AddSecu
         isVisible: true,
         isUserProperty: false
       };
-      var propTpl = App.config.createDefaultConfig(prop.name, 'Cluster', fileName, false, coreObject);
+      var propTpl = App.config.createDefaultConfig(prop.name, fileName, false, coreObject);
       var siteProperty = siteProperties.filterProperty('filename', fileName).findProperty('name', prop.name);
       if (!Em.isNone(siteProperty)) {
         propTpl = $.extend(true, {}, siteProperty, propTpl);

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/app/controllers/main/admin/serviceAccounts_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/serviceAccounts_controller.js b/ambari-web/app/controllers/main/admin/serviceAccounts_controller.js
index 45cfbcf..5b98bf3 100644
--- a/ambari-web/app/controllers/main/admin/serviceAccounts_controller.js
+++ b/ambari-web/app/controllers/main/admin/serviceAccounts_controller.js
@@ -103,7 +103,10 @@ App.MainAdminServiceAccountsController = App.MainServiceInfoConfigsController.ex
    * @param {Object[]} serverConfigs
    */
   createConfigObject: function(serverConfigs) {
-    var configs = App.config.mergePredefinedWithSaved(serverConfigs, this.get('selectedService'));
+    var configs = [];
+    serverConfigs.forEach(function(configObject) {
+      configs = configs.concat(App.config.getConfigsFromJSON(configObject, true));
+    });
     var miscConfigs = configs.filterProperty('displayType', 'user').filterProperty('category', 'Users and Groups');
     miscConfigs.setEach('isVisible', true);
     this.set('users', miscConfigs);

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/app/controllers/main/service/info/configs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/info/configs.js b/ambari-web/app/controllers/main/service/info/configs.js
index 3cd652a..a22bb48 100644
--- a/ambari-web/app/controllers/main/service/info/configs.js
+++ b/ambari-web/app/controllers/main/service/info/configs.js
@@ -343,15 +343,15 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ConfigsLoader, A
   prepareConfigObjects: function(data, serviceName) {
     this.get('stepConfigs').clear();
 
-    var configGroups = [];
-    data.items.forEach(function (v) {
-      if (v.group_name == 'default') {
-        v.configurations.forEach(function (c) {
-          configGroups.pushObject(c);
+    var configs = [];
+    data.items.forEach(function (version) {
+      if (version.group_name == 'default') {
+        version.configurations.forEach(function (configObject) {
+          configs = configs.concat(App.config.getConfigsFromJSON(configObject, true));
         });
       }
     });
-    var configs = App.config.mergePredefinedWithSaved(configGroups, serviceName, this.get('selectedConfigGroup'), this.get('canEdit'));
+
     configs = App.config.sortConfigs(configs);
     /**
      * if property defined in stack but somehow it missed from cluster properties (can be after stack upgrade)
@@ -367,19 +367,33 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ConfigsLoader, A
     if (this.get('content.serviceName') === 'KERBEROS') {
       var kdc_type = configs.findProperty('name', 'kdc_type');
       if (kdc_type.get('value') === 'none') {
-        configs.findProperty('name', 'kdc_host').set('isRequired', false).set('isVisible', false);
-        configs.findProperty('name', 'admin_server_host').set('isRequired', false).set('isVisible', false);
-        configs.findProperty('name', 'domains').set('isRequired', false).set('isVisible', false);
+        configs.findProperty('name', 'kdc_host').set('isVisible', false);
+        configs.findProperty('name', 'admin_server_host').set('isVisible', false);
+        configs.findProperty('name', 'domains').set('isVisible', false);
       } else if (kdc_type.get('value') === 'active-directory') {
         configs.findProperty('name', 'container_dn').set('isVisible', true);
         configs.findProperty('name', 'ldap_url').set('isVisible', true);
       }
     }
 
+    this.setPropertyIsEditable(configs);
     this.set('allConfigs', configs);
   },
 
   /**
+   * Set <code>isEditable<code> proeperty based on selected group, security
+   * and controller restriction
+   * @param configs
+   */
+  setPropertyIsEditable: function(configs) {
+    if (!this.get('selectedConfigGroup.isDefault') || !this.get('canEdit')) {
+      configs.setEach('isEditable', false);
+    } else if (App.get('isKerberosEnabled')) {
+      configs.filterProperty('isSecureConfig').setEach('isEditable', false);
+    }
+  },
+
+  /**
    * adds properties form stack that doesn't belong to cluster
    * to step configs
    * also set recommended value if isn't exists

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/app/controllers/wizard.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard.js b/ambari-web/app/controllers/wizard.js
index 329d246..654bea5 100644
--- a/ambari-web/app/controllers/wizard.js
+++ b/ambari-web/app/controllers/wizard.js
@@ -895,7 +895,6 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, App.ThemesMappingM
         }
         var configProperty = App.config.createDefaultConfig(
           _configProperties.get('name'),
-          _configProperties.get('serviceName'),
           _configProperties.get('filename'),
           _configProperties.get('isUserProperty'),
           {value: _configProperties.get('value')}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/app/controllers/wizard/step7_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js b/ambari-web/app/controllers/wizard/step7_controller.js
index 0b29fe5..fc6b5ab 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -652,49 +652,6 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
   },
 
   /**
-   * Resolve config theme conditions
-   * in order to correctly calculate config errors number of service
-   * @param {Array} configs
-   */
-  resolveConfigThemeConditions: function (configs) {
-    App.ThemeCondition.find().forEach(function (configCondition) {
-      var _configs = Em.A(configCondition.get('configs'));
-      if (configCondition.get("resource") === 'config' && _configs.length > 0) {
-        var isConditionTrue = App.config.calculateConfigCondition(configCondition.get("if"), configs);
-        var action = isConditionTrue ? configCondition.get("then") : configCondition.get("else");
-        if (configCondition.get('id')) {
-          var valueAttributes = action.property_value_attributes;
-          if (valueAttributes && !Em.none(valueAttributes['visible'])) {
-            var themeResource;
-            if (configCondition.get('type') === 'subsection') {
-              themeResource = App.SubSection.find().findProperty('name', configCondition.get('name'));
-            } else if (configCondition.get('type') === 'subsectionTab') {
-              themeResource = App.SubSectionTab.find().findProperty('name', configCondition.get('name'));
-            } else if (configCondition.get('type') === 'config') {
-              //simulate section wrapper for condition type "config"
-              themeResource = Em.Object.create({
-                configProperties: [App.config.configId(configCondition.get('configName'), configCondition.get('fileName'))]
-              });
-            }
-            if (themeResource) {
-              themeResource.get('configProperties').forEach(function (_configId) {
-                configs.forEach(function (item) {
-                  if (App.config.configId(item.name, item.filename) === _configId) {
-                    // if config has already been hidden by condition with "subsection" or "subsectionTab" type
-                    // then ignore condition of "config" type
-                    if (configCondition.get('type') === 'config' && item.hiddenBySection) return false;
-                    item.hiddenBySection = !valueAttributes['visible'];
-                  }
-                });
-              }, this);
-            }
-          }
-        }
-      }
-    });
-  },
-
-  /**
    * Update hawq configuration depending on the state of the cluster
    * @param {Array} configs
    */
@@ -724,7 +681,7 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
     if (App.get('isKerberosEnabled') && this.get('wizardController.name') == 'addServiceController') {
       this.addKerberosDescriptorConfigs(configs, this.get('wizardController.kerberosDescriptorConfigs') || []);
     }
-    this.resolveConfigThemeConditions(configs);
+    App.configTheme.resolveConfigThemeConditions(configs);
     var stepConfigs = this.createStepConfigs();
     var serviceConfigs = this.renderConfigs(stepConfigs, configs);
     // if HA is enabled -> Make some reconfigurations
@@ -845,16 +802,16 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
     ];
 
     propertyNames.forEach(function(propertyName, propertyIndex) {
-      var propertyFromHdfs = configs.findProperty('id', propertyName + '__hdfs-site');
-      var newProperty = App.config.createDefaultConfig(propertyName, 'HAWQ', 'hdfs-client.xml', true);
+      var propertyFromHdfs = configs.findProperty('id', App.config.configId(propertyName, 'hdfs-site'));
+      var newProperty = App.config.createDefaultConfig(propertyName, 'hdfs-client.xml', true);
       Em.setProperties(newProperty, {
+        serviceName: 'HAWQ',
         description: propertyFromHdfs.description,
         displayName: propertyFromHdfs.displayName,
         displayType: 'string',
         index: propertyIndex,
         isOverridable: false,
         isReconfigurable: false,
-        name: propertyFromHdfs.name,
         value: propertyFromHdfs.value,
         recommendedValue: propertyFromHdfs.recommendedValue
       });
@@ -871,8 +828,8 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
    * @returns {Object[]} existing configs + additional config parameters in yarn-client.xml
    */
   addHawqConfigsOnRMHa: function(configs) {
-    var rmHost1 = configs.findProperty('id', 'yarn.resourcemanager.hostname.rm1__yarn-site').value ;
-    var rmHost2 = configs.findProperty('id', 'yarn.resourcemanager.hostname.rm2__yarn-site').value ;
+    var rmHost1 = configs.findProperty('id', App.config.configId('yarn.resourcemanager.hostname.rm1', 'yarn-site')).value ;
+    var rmHost2 = configs.findProperty('id', App.config.configId('yarn.resourcemanager.hostname.rm2', 'yarn-site')).value ;
     var yarnConfigToBeAdded = [
       {
         name: 'yarn.resourcemanager.ha',
@@ -889,10 +846,10 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
     ];
 
     yarnConfigToBeAdded.forEach(function(propertyDetails) {
-      var newProperty = App.config.createDefaultConfig(propertyDetails.name, 'HAWQ', 'yarn-client.xml', true);
+      var newProperty = App.config.createDefaultConfig(propertyDetails.name, 'yarn-client.xml', true);
       var value = rmHost1 + ':' + propertyDetails.port + ',' + rmHost2 + ':' + propertyDetails.port;
       Em.setProperties(newProperty, {
-        name: propertyDetails.name,
+        serviceName: 'HAWQ',
         description: propertyDetails.description,
         displayName: propertyDetails.displayName,
         isOverridable: false,
@@ -1143,10 +1100,7 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
     //add user properties
     Em.keys(configsMap).forEach(function (filename) {
       Em.keys(configsMap[filename]).forEach(function (propertyName) {
-        allConfigs.push(App.config.createDefaultConfig(propertyName,
-          App.config.getServiceByConfigType(filename) ? App.config.getServiceByConfigType(filename).get('serviceName') : 'MISC',
-          App.config.getOriginalFileName(filename),
-          false, {
+        allConfigs.push(App.config.createDefaultConfig(propertyName, App.config.getOriginalFileName(filename), false, {
             value: configsMap[filename][propertyName],
             savedValue: configsMap[filename][propertyName],
             hasInitialValue: true
@@ -1376,7 +1330,7 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
         return true;
       } else if (stackProperty.propertyDependsOn.length) {
         return !!stackProperty.propertyDependsOn.filter(function (p) {
-          var service = App.config.getServiceByConfigType(p.type);
+          var service = App.config.get('serviceByConfigTypeMap')[p.type];
           return service && !this.get('installedServices')[service.get('serviceName')];
         }, this).length;
       } else {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/configs/stack_config_properties_mapper.js b/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
index 2c7328e..32a45a5 100644
--- a/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
+++ b/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
@@ -114,7 +114,7 @@ App.stackConfigPropertiesMapper = App.QuickDataMapper.create({
                 name : dep.StackConfigurationDependency.dependency_name
               });
               var service = App.StackService.find(config.StackConfigurations.service_name);
-              var dependentService = App.config.getServiceByConfigType(dep.StackConfigurationDependency.dependency_type);
+              var dependentService = App.config.get('serviceByConfigTypeMap')[dep.StackConfigurationDependency.dependency_type];
               if (dependentService && service && dependentService.get('serviceName') != service.get('serviceName') && !service.get('dependentServiceNames').contains(dependentService.get('serviceName'))) {
                 service.set('dependentServiceNames', service.get('dependentServiceNames').concat(dependentService.get('serviceName')));
               }
@@ -123,7 +123,7 @@ App.stackConfigPropertiesMapper = App.QuickDataMapper.create({
           if (Em.get(config, 'StackConfigurations.property_depends_on.length') > 0) {
             config.StackConfigurations.property_depends_on.forEach(function(dep) {
               var service = App.StackService.find(config.StackConfigurations.service_name);
-              var dependentService = App.config.getServiceByConfigType(dep.type);
+              var dependentService = App.config.get('serviceByConfigTypeMap')[dep.type];
               if (dependentService && service && dependentService.get('serviceName') != service.get('serviceName') && !service.get('dependentServiceNames').contains(dependentService.get('serviceName'))) {
                 service.set('dependentServiceNames', service.get('dependentServiceNames').concat(dependentService.get('serviceName')));
               }

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/app/mixins/common/kdc_credentials_controller_mixin.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/kdc_credentials_controller_mixin.js b/ambari-web/app/mixins/common/kdc_credentials_controller_mixin.js
index 45bc551..86ae83c 100644
--- a/ambari-web/app/mixins/common/kdc_credentials_controller_mixin.js
+++ b/ambari-web/app/mixins/common/kdc_credentials_controller_mixin.js
@@ -111,7 +111,7 @@ App.KDCCredentialsControllerMixin = Em.Mixin.create({
   initilizeKDCStoreProperties: function(configs) {
     var self = this;
     this.get('credentialsStoreConfigs').forEach(function(item) {
-      var configObject = App.config.createDefaultConfig(item.name, 'KERBEROS', 'krb5-conf.xml', false, false);
+      var configObject = App.config.createDefaultConfig(item.name, 'krb5-conf.xml', false);
       $.extend(configObject, item);
       if (item.name === 'persist_credentials') {
         if (self.get('isStorePersisted')) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/app/models/stack_service.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/stack_service.js b/ambari-web/app/models/stack_service.js
index 281e0d8..14af448 100644
--- a/ambari-web/app/models/stack_service.js
+++ b/ambari-web/app/models/stack_service.js
@@ -48,8 +48,14 @@ App.StackService = DS.Model.extend({
    * @type {String[]}
    */
   configTypeList: function() {
-    return Object.keys(this.get('configTypes') || {});
+    var configTypes = Object.keys(this.get('configTypes') || {});
+    //Falcon has dependency on oozie-site but oozie-site advanced/custom section should not be shown on Falcon page
+    if (this.get('serviceName') === 'FALCON') {
+      configTypes = configTypes.without('oozie-site');
+    }
+    return configTypes;
   }.property('configTypes'),
+
   /**
    * contains array of serviceNames that have configs that
    * depends on configs from current service
@@ -367,12 +373,7 @@ App.StackService.configCategories = function () {
   }
   serviceConfigCategories.pushObject(App.ServiceConfigCategory.create({ name: 'Advanced', displayName: 'Advanced'}));
 
-  var configTypes = Object.keys(this.get('configTypes'));
-
-  //Falcon has dependency on oozie-site but oozie-site advanced/custom section should not be shown on Falcon page
-  if (this.get('serviceName') !== 'OOZIE') {
-    configTypes = configTypes.without('oozie-site');
-  }
+  var configTypes = this.get('configTypeList');
 
   // Add Advanced section for every configType to all the services
   configTypes.forEach(function (type) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/app/utils.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils.js b/ambari-web/app/utils.js
index 920aa55..1041a9c 100644
--- a/ambari-web/app/utils.js
+++ b/ambari-web/app/utils.js
@@ -24,6 +24,7 @@ require('utils/base64');
 require('utils/db');
 require('utils/helper');
 require('utils/config');
+require('utils/configs/theme/theme');
 require('utils/configs/config_initializer');
 require('utils/configs/nn_ha_config_initializer');
 require('utils/configs/rm_ha_config_initializer');

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/app/utils/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/config.js b/ambari-web/app/utils/config.js
index 40d4699..ca1855a 100644
--- a/ambari-web/app/utils/config.js
+++ b/ambari-web/app/utils/config.js
@@ -206,21 +206,10 @@ App.config = Em.Object.create({
     }
   },
 
-  /**
-   * get service for current config type
-   * @param {String} configType - config fileName without xml
-   * @return App.StackService
-   */
-  getServiceByConfigType: function(configType) {
-    return App.StackService.find().find(function(s) {
-      return s.get('configTypeList').contains(configType);
-    });
-  },
-
   serviceByConfigTypeMap: function () {
     var ret = {};
     App.StackService.find().forEach(function(s) {
-      Object.keys(s.get('configTypes')).forEach(function (ct) {
+      s.get('configTypeList').forEach(function (ct) {
         ret[ct] = s;
       });
     });
@@ -228,52 +217,37 @@ App.config = Em.Object.create({
   }.property(),
 
   /**
-   * generates config objects
-   * @param configGroups
-   * @param serviceName
-   * @param selectedConfigGroup
-   * @param canEdit
+   * Generate configs collection with Ember or plain config objects
+   * from config JSON
+   *
+   * @param configJSON
+   * @param useEmberObject
    * @returns {Array}
    */
-  mergePredefinedWithSaved: function (configGroups, serviceName, selectedConfigGroup, canEdit) {
-    var configs = [];
-
-    configGroups.forEach(function (siteConfig) {
-      var filename = App.config.getOriginalFileName(siteConfig.type);
-      var attributes = siteConfig['properties_attributes'] || {};
-      var finalAttributes = attributes.final || {};
-      var properties = siteConfig.properties || {};
-
-      for (var index in properties) {
-        var serviceConfigObj = this.getDefaultConfig(index, serviceName, filename);
-        this.restrictSecureProperties(serviceConfigObj);
-
-        if (serviceConfigObj.isRequiredByAgent !== false) {
-          var formattedValue = this.formatPropertyValue(serviceConfigObj, properties[index]);
-          serviceConfigObj.value = serviceConfigObj.savedValue = formattedValue;
-          serviceConfigObj.isFinal = serviceConfigObj.savedIsFinal = finalAttributes[index] === "true";
-          serviceConfigObj.isEditable = this.getIsEditable(serviceConfigObj, selectedConfigGroup, canEdit);
-          serviceConfigObj.isVisible = serviceConfigObj.isVisible !== false || serviceName === 'MISC';
-        }
+  getConfigsFromJSON: function(configJSON, useEmberObject) {
+    var configs = [],
+      filename = App.config.getOriginalFileName(configJSON.type),
+      properties = configJSON.properties,
+      finalAttributes = Em.get(configJSON, 'properties_attributes.final') || {};
 
+    for (var index in properties) {
+      var serviceConfigObj = this.getDefaultConfig(index, filename);
+
+      if (serviceConfigObj.isRequiredByAgent !== false) {
+        serviceConfigObj.value = serviceConfigObj.savedValue = this.formatPropertyValue(serviceConfigObj, properties[index]);
+        serviceConfigObj.isFinal = serviceConfigObj.savedIsFinal = finalAttributes[index] === "true";
+        serviceConfigObj.isEditable = serviceConfigObj.isReconfigurable;
+      }
+
+      if (useEmberObject) {
         var serviceConfigProperty = App.ServiceConfigProperty.create(serviceConfigObj);
         serviceConfigProperty.validate();
         configs.push(serviceConfigProperty);
+      } else {
+        configs.push(serviceConfigObj);
       }
-    }, this);
-    return configs;
-  },
-
-  /**
-   * put secure properties in read-only mode
-   * @param {object} config
-   */
-  restrictSecureProperties: function (config) {
-    if (config.isSecureConfig) {
-      var isReadOnly = App.get('isKerberosEnabled');
-      config.isReconfigurable = config.isReconfigurable && !isReadOnly;
-      config.isOverridable = config.isOverridable && !isReadOnly;
     }
+    return configs;
   },
 
   /**
@@ -282,14 +256,13 @@ App.config = Em.Object.create({
    * such config
    *
    * @param name
-   * @param serviceName
    * @param fileName
    * @param coreObject
    * @returns {*|Object}
    */
-  getDefaultConfig: function(name, serviceName, fileName, coreObject) {
+  getDefaultConfig: function(name, fileName, coreObject) {
     var cfg = App.configsCollection.getConfigByName(name, fileName) ||
-      App.config.createDefaultConfig(name, serviceName, fileName, false, coreObject);
+      App.config.createDefaultConfig(name, fileName, false);
     if (Em.typeOf(coreObject) === 'object') {
       Em.setProperties(cfg, coreObject);
     }
@@ -301,13 +274,13 @@ App.config = Em.Object.create({
    * These property values has the lowest priority and can be overridden be stack/UI
    * config property but is used when such properties are absent in stack/UI configs
    * @param {string} name
-   * @param {string} serviceName
    * @param {string} fileName
    * @param {boolean} definedInStack
    * @param {Object} [coreObject]
    * @returns {Object}
    */
-  createDefaultConfig: function(name, serviceName, fileName, definedInStack, coreObject) {
+  createDefaultConfig: function(name, fileName, definedInStack, coreObject) {
+    var serviceName = this.get('serviceByConfigTypeMap')[fileName] || 'MISC';
     var tpl = {
       /** core properties **/
       id: this.configId(name, fileName),
@@ -457,17 +430,6 @@ App.config = Em.Object.create({
   },
 
   /**
-   * Calculate isEditable rely on controller state selected group and config restriction
-   * @param {Object} serviceConfigProperty
-   * @param {Object} selectedConfigGroup
-   * @param {boolean} canEdit
-   * @returns {boolean}
-   */
-  getIsEditable: function(serviceConfigProperty, selectedConfigGroup, canEdit) {
-    return canEdit && Em.get(selectedConfigGroup, 'isDefault') && Em.get(serviceConfigProperty, 'isReconfigurable')
-  },
-
-  /**
    * format property value depending on displayType
    * and one exception for 'kdc_type'
    * @param serviceConfigProperty
@@ -476,29 +438,14 @@ App.config = Em.Object.create({
    */
   formatPropertyValue: function(serviceConfigProperty, originalValue) {
     var value = Em.isNone(originalValue) ? Em.get(serviceConfigProperty, 'value') : originalValue,
-        displayType = Em.get(serviceConfigProperty, 'displayType') || Em.get(serviceConfigProperty, 'valueAttributes.type'),
-        category = Em.get(serviceConfigProperty, 'category');
+        displayType = Em.get(serviceConfigProperty, 'displayType') || Em.get(serviceConfigProperty, 'valueAttributes.type');
+    if (Em.get(serviceConfigProperty, 'name') === 'kdc_type') {
+      return App.router.get('mainAdminKerberosController.kdcTypesValues')[value];
+    }
+    if ( /^\s+$/.test("" + value)) {
+      return " ";
+    }
     switch (displayType) {
-      case 'content':
-      case 'string':
-      case 'multiLine':
-        return this.trimProperty({ displayType: displayType, value: value });
-        break;
-      case 'directories':
-        if (['DataNode', 'NameNode'].contains(category)) {
-          return value.split(',').sort().join(',');//TODO check if this code is used
-        }
-        break;
-      case 'directory':
-        if (['SNameNode'].contains(category)) {
-          return value.split(',').sort()[0];//TODO check if this code is used
-        }
-        break;
-      case 'componentHosts':
-        if (typeof(value) == 'string') {
-          return value.replace(/\[|]|'|&apos;/g, "").split(',');
-        }
-        break;
       case 'int':
         if (/\d+m$/.test(value) ) {
           return value.slice(0, value.length - 1);
@@ -506,18 +453,24 @@ App.config = Em.Object.create({
           var int = parseInt(value);
           return isNaN(int) ? "" : int.toString();
         }
-        break;
       case 'float':
         var float = parseFloat(value);
         return isNaN(float) ? "" : float.toString();
+      case 'componentHosts':
+        if (typeof(value) == 'string') {
+          return value.replace(/\[|]|'|&apos;/g, "").split(',');
+        }
+        return value;
+      case 'content':
+      case 'string':
+      case 'multiLine':
+      case 'directories':
+      case 'directory':
+        return this.trimProperty({ displayType: displayType, value: value });
+      default:
+        return value;
     }
-    if (Em.get(serviceConfigProperty, 'name') === 'kdc_type') {
-      return App.router.get('mainAdminKerberosController.kdcTypesValues')[value];
-    }
-    if ( /^\s+$/.test("" + value)) {
-      value = " ";
-    }
-    return value;
+
   },
 
   /**
@@ -559,43 +512,6 @@ App.config = Em.Object.create({
   },
 
   /**
-   *
-   * @param {string} ifStatement
-   * @param {Array} serviceConfigs
-   * @returns {boolean}
-   */
-  calculateConfigCondition: function(ifStatement, serviceConfigs) {
-    // Split `if` statement if it has logical operators
-    var ifStatementRegex = /(&&|\|\|)/;
-    var IfConditions = ifStatement.split(ifStatementRegex);
-    var allConditionResult = [];
-    IfConditions.forEach(function(_condition){
-      var condition = _condition.trim();
-      if (condition === '&&' || condition === '||') {
-        allConditionResult.push(_condition);
-      }  else {
-        var splitIfCondition = condition.split('===');
-        var ifCondition = splitIfCondition[0];
-        var result = splitIfCondition[1] || "true";
-        var parseIfConditionVal = ifCondition;
-        var regex = /\$\{.*?\}/g;
-        var configStrings = ifCondition.match(regex);
-        configStrings.forEach(function (_configString) {
-          var configObject = _configString.substring(2, _configString.length - 1).split("/");
-          var config = serviceConfigs.filterProperty('filename', configObject[0] + '.xml').findProperty('name', configObject[1]);
-          if (config) {
-            var configValue = Em.get(config, 'value');
-            parseIfConditionVal = parseIfConditionVal.replace(_configString, configValue);
-          }
-        }, this);
-        var conditionResult = window.eval(JSON.stringify(parseIfConditionVal.trim())) === result.trim();
-        allConditionResult.push(conditionResult);
-      }
-    }, this);
-    return Boolean(window.eval(allConditionResult.join('')));
-  },
-
-  /**
    * create new ServiceConfig object by service name
    * @param {string} serviceName
    * @param {App.ServiceConfigGroup[]} [configGroups]
@@ -687,7 +603,7 @@ App.config = Em.Object.create({
    * @return {Object}
    **/
   createCustomGroupConfig: function (propertyName, fileName, value, group, isEditable, isInstaller) {
-    var propertyObject = this.createDefaultConfig(propertyName, group.get('service.serviceName'), this.getOriginalFileName(fileName), false, {
+    var propertyObject = this.createDefaultConfig(propertyName, this.getOriginalFileName(fileName), false, {
       savedValue: isInstaller ? null : value,
       value: value,
       group: group,
@@ -726,8 +642,9 @@ App.config = Em.Object.create({
     var savedIsFinal = connectedConfigs.someProperty('savedIsFinal', true);
     var recommendedIsFinal = connectedConfigs.someProperty('recommendedIsFinal', true);
 
-    var cs = App.config.createDefaultConfig('capacity-scheduler', 'YARN', 'capacity-scheduler.xml', true, {
+    var cs = App.config.createDefaultConfig('capacity-scheduler', 'capacity-scheduler.xml', true, {
       'value': value,
+      'serviceName': 'YARN',
       'savedValue': savedValue || null,
       'recommendedValue': recommendedValue || null,
       'isFinal': isFinal,

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/app/utils/configs/theme/theme.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/theme/theme.js b/ambari-web/app/utils/configs/theme/theme.js
new file mode 100644
index 0000000..236faa8
--- /dev/null
+++ b/ambari-web/app/utils/configs/theme/theme.js
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+
+App.configTheme = Em.Object.create({
+
+  /**
+   * Resolve config theme conditions
+   * in order to correctly calculate config errors number of service
+   * @param {Array} configs
+   */
+  resolveConfigThemeConditions: function (configs) {
+    App.ThemeCondition.find().forEach(function (configCondition) {
+      var _configs = Em.A(configCondition.get('configs'));
+      if (configCondition.get("resource") === 'config' && _configs.length > 0) {
+        var isConditionTrue = this.calculateConfigCondition(configCondition.get("if"), configs);
+        var action = isConditionTrue ? configCondition.get("then") : configCondition.get("else");
+        if (configCondition.get('id')) {
+          var valueAttributes = action.property_value_attributes;
+          if (valueAttributes && !Em.none(valueAttributes['visible'])) {
+            var themeResource;
+            if (configCondition.get('type') === 'subsection') {
+              themeResource = App.SubSection.find().findProperty('name', configCondition.get('name'));
+            } else if (configCondition.get('type') === 'subsectionTab') {
+              themeResource = App.SubSectionTab.find().findProperty('name', configCondition.get('name'));
+            } else if (configCondition.get('type') === 'config') {
+              //simulate section wrapper for condition type "config"
+              themeResource = Em.Object.create({
+                configProperties: [App.config.configId(configCondition.get('configName'), configCondition.get('fileName'))]
+              });
+            }
+            if (themeResource) {
+              themeResource.get('configProperties').forEach(function (_configId) {
+                configs.forEach(function (item) {
+                  if (App.config.configId(item.name, item.filename) === _configId) {
+                    // if config has already been hidden by condition with "subsection" or "subsectionTab" type
+                    // then ignore condition of "config" type
+                    if (configCondition.get('type') === 'config' && item.hiddenBySection) return false;
+                    item.hiddenBySection = !valueAttributes['visible'];
+                  }
+                });
+              }, this);
+            }
+          }
+        }
+      }
+    });
+  },
+
+  /**
+   *
+   * @param {string} ifStatement
+   * @param {Array} serviceConfigs
+   * @returns {boolean}
+   */
+  calculateConfigCondition: function(ifStatement, serviceConfigs) {
+    // Split `if` statement if it has logical operators
+    var ifStatementRegex = /(&&|\|\|)/;
+    var IfConditions = ifStatement.split(ifStatementRegex);
+    var allConditionResult = [];
+    IfConditions.forEach(function(_condition){
+      var condition = _condition.trim();
+      if (condition === '&&' || condition === '||') {
+        allConditionResult.push(_condition);
+      }  else {
+        var splitIfCondition = condition.split('===');
+        var ifCondition = splitIfCondition[0];
+        var result = splitIfCondition[1] || "true";
+        var parseIfConditionVal = ifCondition;
+        var regex = /\$\{.*?\}/g;
+        var configStrings = ifCondition.match(regex);
+        configStrings.forEach(function (_configString) {
+          var configObject = _configString.substring(2, _configString.length - 1).split("/");
+          var config = serviceConfigs.filterProperty('filename', configObject[0] + '.xml').findProperty('name', configObject[1]);
+          if (config) {
+            var configValue = Em.get(config, 'value');
+            parseIfConditionVal = parseIfConditionVal.replace(_configString, configValue);
+          }
+        }, this);
+        var conditionResult = window.eval(JSON.stringify(parseIfConditionVal.trim())) === result.trim();
+        allConditionResult.push(conditionResult);
+      }
+    }, this);
+    return Boolean(window.eval(allConditionResult.join('')));
+  }
+
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/app/views/common/configs/widgets/config_widget_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/widgets/config_widget_view.js b/ambari-web/app/views/common/configs/widgets/config_widget_view.js
index 9858b75..3ad9e3b 100644
--- a/ambari-web/app/views/common/configs/widgets/config_widget_view.js
+++ b/ambari-web/app/views/common/configs/widgets/config_widget_view.js
@@ -394,7 +394,7 @@ App.ConfigWidgetView = Em.View.extend(App.SupportsDependentConfigs, App.WidgetPo
     configConditions.forEach(function(configCondition){
       var ifStatement =  configCondition.get("if");
       if (configCondition.get("resource") === 'config') {
-        isConditionTrue = App.config.calculateConfigCondition(ifStatement, serviceConfigs);
+        isConditionTrue = App.configTheme.calculateConfigCondition(ifStatement, serviceConfigs);
         if (configCondition.get("type") === 'subsection' || configCondition.get("type") === 'subsectionTab') {
           this.changeSubsectionAttribute(configCondition, isConditionTrue);
         } else {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/test/controllers/main/admin/kerberos/step4_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/admin/kerberos/step4_controller_test.js b/ambari-web/test/controllers/main/admin/kerberos/step4_controller_test.js
index fa78916..1b706ea 100644
--- a/ambari-web/test/controllers/main/admin/kerberos/step4_controller_test.js
+++ b/ambari-web/test/controllers/main/admin/kerberos/step4_controller_test.js
@@ -185,14 +185,17 @@ describe('App.KerberosWizardStep4Controller', function() {
         sinon.stub(App.StackService, 'find').returns([
           Em.Object.create({
             serviceName: 'KERBEROS',
-            configCategories: []
+            configCategories: [],
+            configTypeList: []
           }),
           Em.Object.create({
             serviceName: 'HDFS',
-            configCategories: []
+            configCategories: [],
+            configTypeList: []
           }),
           Em.Object.create({
-            serviceName: 'MAPREDUCE2'
+            serviceName: 'MAPREDUCE2',
+            configTypeList: []
           })
         ]);
         sinon.stub(App.Service, 'find').returns([

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/test/controllers/wizard/step7_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/wizard/step7_test.js b/ambari-web/test/controllers/wizard/step7_test.js
index 0ceec97..1291ed9 100644
--- a/ambari-web/test/controllers/wizard/step7_test.js
+++ b/ambari-web/test/controllers/wizard/step7_test.js
@@ -2323,14 +2323,15 @@ describe('App.InstallerStep7Controller', function () {
       beforeEach(function () {
         installerStep7Controller.set('wizardController', {name: 'addServiceController'});
         this.stub = sinon.stub(App.configsCollection, 'getConfigByName');
-        sinon.stub(App.config, 'getServiceByConfigType', function (type) {
-          return Em.Object.create({serviceName: type === 't1' ? 's1' : 's2'});
+        sinon.stub(App.config, 'get').withArgs('serviceByConfigTypeMap').returns({
+          't1': Em.Object.create({serviceName: 's1'}),
+          't2': Em.Object.create({serviceName: 's2'})
         })
       });
 
       afterEach(function () {
         App.configsCollection.getConfigByName.restore();
-        App.config.getServiceByConfigType.restore();
+        App.config.get.restore();
       });
 
       it('stackProperty does not exist', function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ea6a7a61/ambari-web/test/utils/config_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/utils/config_test.js b/ambari-web/test/utils/config_test.js
index 8dda9f6..e556230 100644
--- a/ambari-web/test/utils/config_test.js
+++ b/ambari-web/test/utils/config_test.js
@@ -510,44 +510,6 @@ describe('App.config', function () {
     });
   });
 
-  describe('#getIsEditable', function() {
-    [{
-        isDefaultGroup: true,
-        isReconfigurable: true,
-        canEdit: true,
-        res: true,
-        m: "isEditable is true"
-      },
-      {
-        isDefaultGroup: false,
-        isReconfigurable: true,
-        canEdit: true,
-        res: false,
-        m: "isEditable is false; config group is not default"
-      },
-      {
-        isDefaultGroup: true,
-        isReconfigurable: false,
-        canEdit: true,
-        res: false,
-        m: "isEditable is true; config is not reconfigurable"
-      },
-      {
-        isDefaultGroup: true,
-        isReconfigurable: true,
-        canEdit: false,
-        res: false,
-        m: "isEditable is true; edition restricted by controller state"
-    }].forEach(function(t) {
-        it(t.m, function() {
-          var configProperty = Ember.Object.create({isReconfigurable: t.isReconfigurable});
-          var configGroup = Ember.Object.create({isDefault: t.isDefaultGroup});
-          var isEditable = App.config.getIsEditable(configProperty, configGroup, t.canEdit);
-          expect(isEditable).to.equal(t.res);
-        })
-      });
-  });
-
   describe('#getIsSecure', function() {
     var secureConfigs = App.config.get('secureConfigs');
     before(function() {
@@ -692,6 +654,12 @@ describe('App.config', function () {
       sinon.stub(App.config, 'shouldSupportFinal', function() {
         return true;
       });
+      sinon.stub(App.config, 'get', function(param) {
+        if (param === 'serviceByConfigTypeMap') {
+          return { 'pFileName': 'pServiceName' };
+        }
+        return Em.get(App.config, param);
+      });
     });
 
     after(function() {
@@ -699,11 +667,12 @@ describe('App.config', function () {
       App.config.getDefaultCategory.restore();
       App.config.getIsSecure.restore();
       App.config.shouldSupportFinal.restore();
+      App.config.get.restore();
     });
 
     var res = {
       /** core properties **/
-      id: "pName__pFileName",
+      id: 'pName__pFileName',
       name: 'pName',
       filename: 'pFileName.xml',
       value: '',
@@ -738,7 +707,7 @@ describe('App.config', function () {
       widgetType: null
     };
     it('create default config object', function () {
-      expect(App.config.createDefaultConfig('pName', 'pServiceName', 'pFileName', true)).to.eql(res);
+      expect(App.config.createDefaultConfig('pName', 'pFileName', true)).to.eql(res);
     });
     it('getDefaultDisplayType is called', function() {
       expect(App.config.getDefaultDisplayType.called).to.be.true;
@@ -890,83 +859,6 @@ describe('App.config', function () {
     });
   });
 
-  describe("#restrictSecureProperties()", function() {
-    var testCases = [
-      {
-        input: {
-          isSecureConfig: true,
-          isKerberosEnabled: true,
-          isReconfigurable: false,
-          isOverridable: false
-        },
-        expected: {
-          isReconfigurable: false,
-          isOverridable: false
-        }
-      },
-      {
-        input: {
-          isSecureConfig: true,
-          isKerberosEnabled: true,
-          isReconfigurable: true,
-          isOverridable: true
-        },
-        expected: {
-          isReconfigurable: false,
-          isOverridable: false
-        }
-      },
-      {
-        input: {
-          isSecureConfig: true,
-          isKerberosEnabled: false,
-          isReconfigurable: true,
-          isOverridable: true
-        },
-        expected: {
-          isReconfigurable: true,
-          isOverridable: true
-        }
-      },
-      {
-        input: {
-          isSecureConfig: false,
-          isReconfigurable: false,
-          isOverridable: false
-        },
-        expected: {
-          isReconfigurable: false,
-          isOverridable: false
-        }
-      },
-      {
-        input: {
-          isSecureConfig: false,
-          isReconfigurable: true,
-          isOverridable: true
-        },
-        expected: {
-          isReconfigurable: true,
-          isOverridable: true
-        }
-      }
-    ];
-
-    testCases.forEach(function(test) {
-      it("isSecureConfig = " + test.input.isSecureConfig + "; isKerberosEnabled = " + test.input.isKerberosEnabled, function() {
-        var config = {
-          isSecureConfig: test.input.isSecureConfig,
-          isReconfigurable: test.input.isReconfigurable,
-          isOverridable: test.input.isOverridable
-        };
-        App.set('isKerberosEnabled', !!test.input.isKerberosEnabled);
-        App.config.restrictSecureProperties(config);
-        expect(config.isReconfigurable).to.equal(test.expected.isReconfigurable);
-        expect(config.isOverridable).to.equal(test.expected.isOverridable);
-      });
-    });
-  });
-
   describe("#truncateGroupName()", function() {
 
     it("name is empty", function() {


[08/11] ambari git commit: AMBARI-15141. Start all services request aborts in the middle and hosts go into heartbeat-lost state. (mpapirkovskyy)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/083ac6da/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
index 2a4cec8..c62352a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
@@ -54,10 +54,8 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 
 import javax.xml.bind.JAXBException;
 
@@ -65,47 +63,28 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.actionmanager.ActionDBAccessor;
-import org.apache.ambari.server.actionmanager.ActionDBAccessorImpl;
 import org.apache.ambari.server.actionmanager.ActionManager;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleCommandFactory;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.actionmanager.Request;
-import org.apache.ambari.server.actionmanager.RequestFactory;
 import org.apache.ambari.server.actionmanager.Stage;
 import org.apache.ambari.server.actionmanager.StageFactory;
 import org.apache.ambari.server.agent.HostStatus.Status;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.HostsMap;
-import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.OrmTestHelper;
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.dao.HostDAO;
-import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
-import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
-import org.apache.ambari.server.orm.entities.ResourceEntity;
-import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
-import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.security.authorization.ResourceType;
 import org.apache.ambari.server.serveraction.kerberos.KerberosIdentityDataFileWriter;
 import org.apache.ambari.server.serveraction.kerberos.KerberosIdentityDataFileWriterFactory;
 import org.apache.ambari.server.serveraction.kerberos.KerberosServerAction;
 import org.apache.ambari.server.state.Alert;
-import org.apache.ambari.server.state.AlertState;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostHealthStatus;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.MaintenanceState;
-import org.apache.ambari.server.state.RepositoryVersionState;
 import org.apache.ambari.server.state.SecurityState;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponentHost;
@@ -113,9 +92,6 @@ import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostUpgradeEvent;
-import org.apache.ambari.server.utils.EventBusSynchronizer;
 import org.apache.ambari.server.utils.StageUtils;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.codec.digest.DigestUtils;
@@ -128,7 +104,6 @@ import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.gson.JsonObject;
 import com.google.inject.Guice;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
@@ -145,8 +120,6 @@ public class TestHeartbeatHandler {
   long requestId = 23;
   long stageId = 31;
 
-  private final static StackId HDP_22_STACK = new StackId("HDP", "2.2.0");
-
   @Inject
   AmbariMetaInfo metaInfo;
 
@@ -157,26 +130,14 @@ public class TestHeartbeatHandler {
   ActionDBAccessor actionDBAccessor;
 
   @Inject
-  OrmTestHelper helper;
-
-  @Inject
-  ResourceTypeDAO resourceTypeDAO;
-
-  @Inject
-  StackDAO stackDAO;
-
-  @Inject
-  ClusterDAO clusterDAO;
-
-  @Inject
-  HostDAO hostDAO;
-
-  @Inject
   StageFactory stageFactory;
 
   @Inject
   HostRoleCommandFactory hostRoleCommandFactory;
 
+  @Inject
+  HeartbeatTestHelper heartbeatTestHelper;
+
   private UnitOfWork unitOfWork;
 
   @Rule
@@ -187,18 +148,7 @@ public class TestHeartbeatHandler {
 
   @Before
   public void setup() throws Exception {
-    module = new InMemoryDefaultTestModule(){
-
-      @Override
-      protected void configure() {
-        getProperties().put("recovery.type", "FULL");
-        getProperties().put("recovery.lifetime_max_count", "10");
-        getProperties().put("recovery.max_count", "4");
-        getProperties().put("recovery.window_in_minutes", "23");
-        getProperties().put("recovery.retry_interval", "2");
-        super.configure();
-      }
-    };
+    module = HeartbeatTestHelper.getTestModule();
     injector = Guice.createInjector(module);
     injector.getInstance(GuiceJpaInitializer.class);
     clusters = injector.getInstance(Clusters.class);
@@ -215,7 +165,7 @@ public class TestHeartbeatHandler {
   @Test
   @SuppressWarnings("unchecked")
   public void testHeartbeat() throws Exception {
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     expect(am.getTasks(anyObject(List.class))).andReturn(new ArrayList<HostRoleCommand>());
     replay(am);
     Clusters fsm = clusters;
@@ -257,387 +207,16 @@ public class TestHeartbeatHandler {
     assertEquals(0, aq.dequeueAll(DummyHostname1).size());
   }
 
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testHeartbeatWithConfigs() throws Exception {
-    Cluster cluster = getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
-    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-
-    ActionQueue aq = new ActionQueue();
-
-    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
-            getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
-    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
-            getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
-    serviceComponentHost1.setState(State.INSTALLED);
-    serviceComponentHost2.setState(State.INSTALLED);
-
-    HeartBeat hb = new HeartBeat();
-    hb.setResponseId(0);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-    hb.setHostname(DummyHostname1);
-
-    List<CommandReport> reports = new ArrayList<CommandReport>();
-    CommandReport cr = new CommandReport();
-    cr.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr.setServiceName(HDFS);
-    cr.setTaskId(1);
-    cr.setRole(DATANODE);
-    cr.setStatus("COMPLETED");
-    cr.setStdErr("");
-    cr.setStdOut("");
-    cr.setExitCode(215);
-    cr.setRoleCommand("START");
-    cr.setClusterName(DummyCluster);
-
-    cr.setConfigurationTags(new HashMap<String, Map<String, String>>() {{
-      put("global", new HashMap<String, String>() {{
-        put("tag", "version1");
-      }});
-    }});
-
-    reports.add(cr);
-    hb.setReports(reports);
-
-    HostEntity host1 = hostDAO.findByName(DummyHostname1);
-    Assert.assertNotNull(host1);
-    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
-            Role.DATANODE, null, null);
-
-    ActionManager am = getMockActionManager();
-    expect(am.getTasks(anyObject(List.class))).andReturn(
-            new ArrayList<HostRoleCommand>() {{
-              add(command);
-            }});
-    replay(am);
-
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
-    handler.handleHeartBeat(hb);
-
-    // the heartbeat test passed if actual configs is populated
-    Assert.assertNotNull(serviceComponentHost1.getActualConfigs());
-    Assert.assertEquals(serviceComponentHost1.getActualConfigs().size(), 1);
-  }
-
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testRestartRequiredAfterInstallClient() throws Exception {
-    Cluster cluster = getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(HDFS_CLIENT).persist();
-    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
-
-    ActionQueue aq = new ActionQueue();
-
-    ServiceComponentHost serviceComponentHost = clusters.getCluster(DummyCluster).getService(HDFS).
-        getServiceComponent(HDFS_CLIENT).getServiceComponentHost(DummyHostname1);
-
-    serviceComponentHost.setState(State.INSTALLED);
-    serviceComponentHost.setRestartRequired(true);
-
-    HeartBeat hb = new HeartBeat();
-    hb.setResponseId(0);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-    hb.setHostname(DummyHostname1);
-
-
-    List<CommandReport> reports = new ArrayList<CommandReport>();
-    CommandReport cr = new CommandReport();
-    cr.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr.setServiceName(HDFS);
-    cr.setRoleCommand("INSTALL");
-    cr.setCustomCommand("EXECUTION_COMMAND");
-    cr.setTaskId(1);
-    cr.setRole(HDFS_CLIENT);
-    cr.setStatus("COMPLETED");
-    cr.setStdErr("");
-    cr.setStdOut("");
-    cr.setExitCode(215);
-    cr.setClusterName(DummyCluster);
-    cr.setConfigurationTags(new HashMap<String, Map<String, String>>() {{
-      put("global", new HashMap<String, String>() {{
-        put("tag", "version1");
-      }});
-    }});
-    reports.add(cr);
-    hb.setReports(reports);
-
-    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
-        Role.DATANODE, null, null);
-
-    ActionManager am = getMockActionManager();
-    expect(am.getTasks(anyObject(List.class))).andReturn(
-      new ArrayList<HostRoleCommand>() {{
-        add(command);
-        add(command);
-      }});
-    replay(am);
-
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
-    handler.handleHeartBeat(hb);
-
-    Assert.assertNotNull(serviceComponentHost.getActualConfigs());
-    Assert.assertFalse(serviceComponentHost.isRestartRequired());
-    Assert.assertEquals(serviceComponentHost.getActualConfigs().size(), 1);
-
-  }
-
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testHeartbeatCustomCommandWithConfigs() throws Exception {
-    Cluster cluster = getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
-    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-
-    ActionQueue aq = new ActionQueue();
-
-    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
-        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
-    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
-        getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
-    serviceComponentHost1.setState(State.INSTALLED);
-    serviceComponentHost2.setState(State.INSTALLED);
-
-    HeartBeat hb = new HeartBeat();
-    hb.setResponseId(0);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-    hb.setHostname(DummyHostname1);
-
-    List<CommandReport> reports = new ArrayList<CommandReport>();
-    CommandReport cr = new CommandReport();
-    cr.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr.setServiceName(HDFS);
-    cr.setRoleCommand("CUSTOM_COMMAND");
-    cr.setCustomCommand("RESTART");
-    cr.setTaskId(1);
-    cr.setRole(DATANODE);
-    cr.setStatus("COMPLETED");
-    cr.setStdErr("");
-    cr.setStdOut("");
-    cr.setExitCode(215);
-    cr.setClusterName(DummyCluster);
-    cr.setConfigurationTags(new HashMap<String, Map<String,String>>() {{
-      put("global", new HashMap<String,String>() {{ put("tag", "version1"); }});
-    }});
-    CommandReport crn = new CommandReport();
-    crn.setActionId(StageUtils.getActionId(requestId, stageId));
-    crn.setServiceName(HDFS);
-    crn.setRoleCommand("CUSTOM_COMMAND");
-    crn.setCustomCommand("START");
-    crn.setTaskId(1);
-    crn.setRole(NAMENODE);
-    crn.setStatus("COMPLETED");
-    crn.setStdErr("");
-    crn.setStdOut("");
-    crn.setExitCode(215);
-    crn.setClusterName(DummyCluster);
-    crn.setConfigurationTags(new HashMap<String, Map<String,String>>() {{
-      put("global", new HashMap<String,String>() {{ put("tag", "version1"); }});
-    }});
-
-    reports.add(cr);
-    reports.add(crn);
-    hb.setReports(reports);
-
-    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
-      Role.DATANODE, null, null);
-
-    ActionManager am = getMockActionManager();
-    expect(am.getTasks(anyObject(List.class))).andReturn(
-            new ArrayList<HostRoleCommand>() {{
-              add(command);
-              add(command);
-            }});
-    replay(am);
-
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
-    handler.handleHeartBeat(hb);
-
-    // the heartbeat test passed if actual configs is populated
-    Assert.assertNotNull(serviceComponentHost1.getActualConfigs());
-    Assert.assertEquals(serviceComponentHost1.getActualConfigs().size(), 1);
-    Assert.assertNotNull(serviceComponentHost2.getActualConfigs());
-    Assert.assertEquals(serviceComponentHost2.getActualConfigs().size(), 1);
-  }
-
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testHeartbeatCustomStartStop() throws Exception {
-    Cluster cluster = getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
-    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-
-    ActionQueue aq = new ActionQueue();
-
-    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
-        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
-    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
-        getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
-    serviceComponentHost1.setState(State.INSTALLED);
-    serviceComponentHost2.setState(State.STARTED);
-    serviceComponentHost1.setRestartRequired(true);
-    serviceComponentHost2.setRestartRequired(true);
-
-    HeartBeat hb = new HeartBeat();
-    hb.setResponseId(0);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-    hb.setHostname(DummyHostname1);
-
-    List<CommandReport> reports = new ArrayList<CommandReport>();
-    CommandReport cr = new CommandReport();
-    cr.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr.setServiceName(HDFS);
-    cr.setRoleCommand("CUSTOM_COMMAND");
-    cr.setCustomCommand("START");
-    cr.setTaskId(1);
-    cr.setRole(DATANODE);
-    cr.setStatus("COMPLETED");
-    cr.setStdErr("");
-    cr.setStdOut("");
-    cr.setExitCode(215);
-    cr.setClusterName(DummyCluster);
-    CommandReport crn = new CommandReport();
-    crn.setActionId(StageUtils.getActionId(requestId, stageId));
-    crn.setServiceName(HDFS);
-    crn.setRoleCommand("CUSTOM_COMMAND");
-    crn.setCustomCommand("STOP");
-    crn.setTaskId(1);
-    crn.setRole(NAMENODE);
-    crn.setStatus("COMPLETED");
-    crn.setStdErr("");
-    crn.setStdOut("");
-    crn.setExitCode(215);
-    crn.setClusterName(DummyCluster);
-
-    reports.add(cr);
-    reports.add(crn);
-    hb.setReports(reports);
-
-    assertTrue(serviceComponentHost1.isRestartRequired());
-
-    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
-      Role.DATANODE, null, null);
-
-    ActionManager am = getMockActionManager();
-    expect(am.getTasks(anyObject(List.class))).andReturn(
-      new ArrayList<HostRoleCommand>() {{
-        add(command);
-        add(command);
-      }});
-    replay(am);
-
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
-    handler.handleHeartBeat(hb);
-
-    // the heartbeat test passed if actual configs is populated
-    State componentState1 = serviceComponentHost1.getState();
-    assertEquals(State.STARTED, componentState1);
-    assertFalse(serviceComponentHost1.isRestartRequired());
-    State componentState2 = serviceComponentHost2.getState();
-    assertEquals(State.INSTALLED, componentState2);
-    assertTrue(serviceComponentHost2.isRestartRequired());
-  }
-
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testStatusHeartbeat() throws Exception {
-    Cluster cluster = getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
-    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-
-    ActionQueue aq = new ActionQueue();
 
-    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
-            getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
-    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
-            getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
-    ServiceComponentHost serviceComponentHost3 = clusters.getCluster(DummyCluster).getService(HDFS).
-        getServiceComponent(SECONDARY_NAMENODE).getServiceComponentHost(DummyHostname1);
-    serviceComponentHost1.setState(State.INSTALLED);
-    serviceComponentHost1.setSecurityState(SecurityState.UNSECURED);
-    serviceComponentHost2.setState(State.INSTALLED);
-    serviceComponentHost2.setSecurityState(SecurityState.SECURING);
-    serviceComponentHost3.setState(State.STARTING);
 
-    HeartBeat hb = new HeartBeat();
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(0);
-    hb.setHostname(DummyHostname1);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-    hb.setReports(new ArrayList<CommandReport>());
-    ArrayList<ComponentStatus> componentStatuses = new ArrayList<ComponentStatus>();
-    ComponentStatus componentStatus1 = new ComponentStatus();
-    componentStatus1.setClusterName(DummyCluster);
-    componentStatus1.setServiceName(HDFS);
-    componentStatus1.setMessage(DummyHostStatus);
-    componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setSecurityState(SecurityState.SECURED_KERBEROS.name());
-    componentStatus1.setComponentName(DATANODE);
-    componentStatuses.add(componentStatus1);
-    ComponentStatus componentStatus2 = new ComponentStatus();
-    componentStatus2.setClusterName(DummyCluster);
-    componentStatus2.setServiceName(HDFS);
-    componentStatus2.setMessage(DummyHostStatus);
-    componentStatus2.setStatus(State.STARTED.name());
-    componentStatus2.setSecurityState(SecurityState.UNSECURED.name());
-    componentStatus2.setComponentName(SECONDARY_NAMENODE);
-    componentStatuses.add(componentStatus2);
-    hb.setComponentStatus(componentStatuses);
 
-    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
-            Role.DATANODE, null, null);
 
-    ActionManager am = getMockActionManager();
-    expect(am.getTasks(anyObject(List.class))).andReturn(
-      new ArrayList<HostRoleCommand>() {{
-        add(command);
-        add(command);
-      }});
-    replay(am);
 
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
-    handler.handleHeartBeat(hb);
-    State componentState1 = serviceComponentHost1.getState();
-    State componentState2 = serviceComponentHost2.getState();
-    State componentState3 = serviceComponentHost3.getState();
-    assertEquals(State.STARTED, componentState1);
-    assertEquals(SecurityState.SECURED_KERBEROS, serviceComponentHost1.getSecurityState());
-    assertEquals(State.INSTALLED, componentState2);
-    assertEquals(SecurityState.SECURING, serviceComponentHost2.getSecurityState());
-    assertEquals(State.STARTED, componentState3);
-    assertEquals(SecurityState.UNSECURED, serviceComponentHost3.getSecurityState());
-  }
 
   @Test
   @SuppressWarnings("unchecked")
   public void testStatusHeartbeatWithAnnotation() throws Exception {
-    Cluster cluster = getDummyCluster();
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
     hdfs.persist();
     hdfs.addServiceComponent(DATANODE).persist();
@@ -658,14 +237,14 @@ public class TestHeartbeatHandler {
     final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
             Role.DATANODE, null, null);
 
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     expect(am.getTasks(anyObject(List.class))).andReturn(
             new ArrayList<HostRoleCommand>() {{
               add(command);
             }}).anyTimes();
     replay(am);
 
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
     HeartBeatResponse resp = handler.handleHeartBeat(hb);
     Assert.assertFalse(resp.hasMappedComponents());
 
@@ -689,7 +268,7 @@ public class TestHeartbeatHandler {
   @Test
   @SuppressWarnings("unchecked")
   public void testLiveStatusUpdateAfterStopFailed() throws Exception {
-    Cluster cluster = getDummyCluster();
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
     hdfs.persist();
     hdfs.addServiceComponent(DATANODE).persist();
@@ -743,7 +322,7 @@ public class TestHeartbeatHandler {
     final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
             Role.DATANODE, null, null);
 
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     expect(am.getTasks(anyObject(List.class))).andReturn(
             new ArrayList<HostRoleCommand>() {{
               add(command);
@@ -751,82 +330,25 @@ public class TestHeartbeatHandler {
             }});
     replay(am);
 
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
-    handler.handleHeartBeat(hb);
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
+    HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
+    heartbeatProcessor.processHeartbeat(hb);
+
     State componentState1 = serviceComponentHost1.getState();
     State componentState2 = serviceComponentHost2.getState();
     assertEquals(State.STARTED, componentState1);
     assertEquals(State.INSTALLED, componentState2);
   }
 
+
   @Test
-  public void testCommandReport() throws AmbariException {
-    injector.injectMembers(this);
-    clusters.addHost(DummyHostname1);
-    clusters.getHost(DummyHostname1).persist();
-
-    StackId dummyStackId = new StackId(DummyStackId);
-    clusters.addCluster(DummyCluster, dummyStackId);
-
-    ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class);
-    ActionManager am = new ActionManager(5000, 1200000, new ActionQueue(), clusters, db,
-        new HostsMap((String) null), unitOfWork, injector.getInstance(RequestFactory.class), null, null);
-    populateActionDB(db, DummyHostname1);
-    Stage stage = db.getAllStages(requestId).get(0);
-    Assert.assertEquals(stageId, stage.getStageId());
-    stage.setHostRoleStatus(DummyHostname1, HBASE_MASTER, HostRoleStatus.QUEUED);
-    db.hostRoleScheduled(stage, DummyHostname1, HBASE_MASTER);
-    List<CommandReport> reports = new ArrayList<CommandReport>();
-    CommandReport cr = new CommandReport();
-    cr.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr.setTaskId(1);
-    cr.setRole(HBASE_MASTER);
-    cr.setStatus("COMPLETED");
-    cr.setStdErr("");
-    cr.setStdOut("");
-    cr.setExitCode(215);
-
-    cr.setConfigurationTags(new HashMap<String, Map<String,String>>() {{
-        put("global", new HashMap<String,String>() {{ put("tag", "version1"); }});
-      }});
-
-
-    reports.add(cr);
-    am.processTaskResponse(DummyHostname1, reports, stage.getOrderedHostRoleCommands());
-    assertEquals(215,
-            am.getAction(requestId, stageId).getExitCode(DummyHostname1, HBASE_MASTER));
-    assertEquals(HostRoleStatus.COMPLETED, am.getAction(requestId, stageId)
-            .getHostRoleStatus(DummyHostname1, HBASE_MASTER));
-    Stage s = db.getAllStages(requestId).get(0);
-    assertEquals(HostRoleStatus.COMPLETED,
-            s.getHostRoleStatus(DummyHostname1, HBASE_MASTER));
-    assertEquals(215,
-            s.getExitCode(DummyHostname1, HBASE_MASTER));
-  }
-
-  private void populateActionDB(ActionDBAccessor db, String DummyHostname1) throws AmbariException {
-    Stage s = stageFactory.createNew(requestId, "/a/b", DummyCluster, 1L, "heartbeat handler test",
-      "clusterHostInfo", "commandParamsStage", "hostParamsStage");
-    s.setStageId(stageId);
-    String filename = null;
-    s.addHostRoleExecutionCommand(DummyHostname1, Role.HBASE_MASTER,
-        RoleCommand.START,
-        new ServiceComponentHostStartEvent(Role.HBASE_MASTER.toString(),
-            DummyHostname1, System.currentTimeMillis()), DummyCluster, HBASE, false, false);
-    List<Stage> stages = new ArrayList<Stage>();
-    stages.add(s);
-    Request request = new Request(stages, clusters);
-    db.persistActions(request);
-  }
-
-  @Test
-  public void testRegistration() throws AmbariException,
-      InvalidStateTransitionException {
-    ActionManager am = getMockActionManager();
-    replay(am);
-    Clusters fsm = clusters;
-    HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
-        injector);
+  public void testRegistration() throws AmbariException,
+      InvalidStateTransitionException {
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
+    replay(am);
+    Clusters fsm = clusters;
+    HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
+        injector);
     clusters.addHost(DummyHostname1);
     Host hostObject = clusters.getHost(DummyHostname1);
     hostObject.setIPv4("ipv4");
@@ -853,12 +375,12 @@ public class TestHeartbeatHandler {
   @Test
   public void testRegistrationRecoveryConfig() throws AmbariException,
       InvalidStateTransitionException {
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     replay(am);
     Clusters fsm = clusters;
     HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
                                                     injector);
-    Cluster cluster = getDummyCluster();
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
     hdfs.persist();
     hdfs.addServiceComponent(DATANODE).persist();
@@ -912,12 +434,12 @@ public class TestHeartbeatHandler {
   @Test
   public void testRegistrationRecoveryConfigMaintenanceMode()
           throws AmbariException, InvalidStateTransitionException {
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     replay(am);
     Clusters fsm = clusters;
     HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
             injector);
-    Cluster cluster = getDummyCluster();
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
     hdfs.persist();
     hdfs.addServiceComponent(DATANODE).persist();
@@ -963,7 +485,7 @@ public class TestHeartbeatHandler {
   @Test
   public void testRegistrationAgentConfig() throws AmbariException,
       InvalidStateTransitionException {
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     replay(am);
     Clusters fsm = clusters;
     HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
@@ -996,7 +518,7 @@ public class TestHeartbeatHandler {
   public void testRegistrationWithBadVersion() throws AmbariException,
       InvalidStateTransitionException {
 
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     replay(am);
     Clusters fsm = clusters;
     HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
@@ -1037,7 +559,7 @@ public class TestHeartbeatHandler {
 
   @Test
   public void testRegistrationPublicHostname() throws AmbariException, InvalidStateTransitionException {
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     replay(am);
     Clusters fsm = clusters;
     HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
@@ -1070,7 +592,7 @@ public class TestHeartbeatHandler {
   @Test
   public void testInvalidOSRegistration() throws AmbariException,
       InvalidStateTransitionException {
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     replay(am);
     Clusters fsm = clusters;
     HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
@@ -1099,7 +621,7 @@ public class TestHeartbeatHandler {
   public void testIncompatibleAgentRegistration() throws AmbariException,
           InvalidStateTransitionException {
 
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     replay(am);
     Clusters fsm = clusters;
     HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
@@ -1127,7 +649,7 @@ public class TestHeartbeatHandler {
   @Test
   public void testRegisterNewNode()
       throws AmbariException, InvalidStateTransitionException {
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     replay(am);
     Clusters fsm = clusters;
     fsm.addHost(DummyHostname1);
@@ -1220,7 +742,7 @@ public class TestHeartbeatHandler {
     HeartbeatMonitor hm = mock(HeartbeatMonitor.class);
     when(hm.generateStatusCommands(anyString())).thenReturn(dummyCmds);
 
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     replay(am);
     Clusters fsm = clusters;
     ActionQueue actionQueue = new ActionQueue();
@@ -1248,7 +770,7 @@ public class TestHeartbeatHandler {
   @Test
   @SuppressWarnings("unchecked")
   public void testTaskInProgressHandling() throws AmbariException, InvalidStateTransitionException {
-    Cluster cluster = getDummyCluster();
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
     hdfs.persist();
     hdfs.addServiceComponent(DATANODE).persist();
@@ -1289,15 +811,16 @@ public class TestHeartbeatHandler {
     final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
             Role.DATANODE, null, RoleCommand.INSTALL);
 
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     expect(am.getTasks(anyObject(List.class))).andReturn(
             new ArrayList<HostRoleCommand>() {{
               add(command);
             }});
     replay(am);
 
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
     handler.handleHeartBeat(hb);
+    handler.getHeartbeatProcessor().processHeartbeat(hb);
     State componentState1 = serviceComponentHost1.getState();
     assertEquals("Host state should still be installing", State.INSTALLING, componentState1);
   }
@@ -1305,7 +828,7 @@ public class TestHeartbeatHandler {
   @Test
   @SuppressWarnings("unchecked")
   public void testOPFailedEventForAbortedTask() throws AmbariException, InvalidStateTransitionException {
-    Cluster cluster = getDummyCluster();
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
     hdfs.persist();
     hdfs.addServiceComponent(DATANODE).persist();
@@ -1342,494 +865,45 @@ public class TestHeartbeatHandler {
 
     List<CommandReport> reports = new ArrayList<CommandReport>();
     CommandReport cr = new CommandReport();
-    cr.setActionId(StageUtils.getActionId(1, 1));
-    cr.setTaskId(1);
-    cr.setClusterName(DummyCluster);
-    cr.setServiceName(HDFS);
-    cr.setRole(DATANODE);
-    cr.setRoleCommand("INSTALL");
-    cr.setStatus("FAILED");
-    cr.setStdErr("none");
-    cr.setStdOut("dummy output");
-    cr.setExitCode(777);
-    reports.add(cr);
-    hb.setReports(reports);
-    hb.setComponentStatus(new ArrayList<ComponentStatus>());
-
-    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
-            Role.DATANODE, null, null);
-
-    ActionManager am = getMockActionManager();
-    expect(am.getTasks(anyObject(List.class))).andReturn(
-            new ArrayList<HostRoleCommand>() {{
-              add(command);
-            }});
-    replay(am);
-
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
-    handler.handleHeartBeat(hb);
-    State componentState1 = serviceComponentHost1.getState();
-    assertEquals("Host state should still be installing", State.INSTALLING,
-      componentState1);
-  }
-
-  /**
-   * Tests the fact that when START and STOP commands are in progress, and heartbeat
-   * forces the host component state to STARTED or INSTALLED, there are no undesired
-   * side effects.
-   * @throws AmbariException
-   * @throws InvalidStateTransitionException
-   */
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testCommandReportOnHeartbeatUpdatedState()
-      throws AmbariException, InvalidStateTransitionException {
-    Cluster cluster = getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-
-    ActionQueue aq = new ActionQueue();
-
-    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
-        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
-    serviceComponentHost1.setState(State.INSTALLED);
-
-    HeartBeat hb = new HeartBeat();
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(0);
-    hb.setHostname(DummyHostname1);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-
-    List<CommandReport> reports = new ArrayList<CommandReport>();
-    CommandReport cr = new CommandReport();
-    cr.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr.setTaskId(1);
-    cr.setClusterName(DummyCluster);
-    cr.setServiceName(HDFS);
-    cr.setRole(DATANODE);
-    cr.setStatus(HostRoleStatus.IN_PROGRESS.toString());
-    cr.setStdErr("none");
-    cr.setStdOut("dummy output");
-    cr.setExitCode(777);
-    cr.setRoleCommand("START");
-    reports.add(cr);
-    hb.setReports(reports);
-    hb.setComponentStatus(new ArrayList<ComponentStatus>());
-
-    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
-            Role.DATANODE, null, null);
-
-    ActionManager am = getMockActionManager();
-    expect(am.getTasks(anyObject(List.class))).andReturn(
-            new ArrayList<HostRoleCommand>() {{
-              add(command);
-            }}).anyTimes();
-    replay(am);
-
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
-    handler.handleHeartBeat(hb);
-    assertEquals("Host state should  be " + State.INSTALLED,
-        State.INSTALLED, serviceComponentHost1.getState());
-
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(1);
-    cr.setStatus(HostRoleStatus.COMPLETED.toString());
-    cr.setExitCode(0);
-
-    handler.handleHeartBeat(hb);
-    assertEquals("Host state should be " + State.STARTED,
-        State.STARTED, serviceComponentHost1.getState());
-
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(2);
-    cr.setStatus(HostRoleStatus.IN_PROGRESS.toString());
-    cr.setRoleCommand("STOP");
-    cr.setExitCode(777);
-
-    handler.handleHeartBeat(hb);
-    assertEquals("Host state should be " + State.STARTED,
-        State.STARTED, serviceComponentHost1.getState());
-
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(3);
-    cr.setStatus(HostRoleStatus.COMPLETED.toString());
-    cr.setExitCode(0);
-
-    handler.handleHeartBeat(hb);
-    assertEquals("Host state should be " + State.INSTALLED,
-        State.INSTALLED, serviceComponentHost1.getState());
-
-    // validate the transitions when there is no heartbeat
-    serviceComponentHost1.setState(State.STARTING);
-    cr.setStatus(HostRoleStatus.IN_PROGRESS.toString());
-    cr.setExitCode(777);
-    cr.setRoleCommand("START");
-    hb.setResponseId(4);
-
-    handler.handleHeartBeat(hb);
-    assertEquals("Host state should be " + State.STARTING,
-        State.STARTING, serviceComponentHost1.getState());
-
-    cr.setStatus(HostRoleStatus.COMPLETED.toString());
-    cr.setExitCode(0);
-    hb.setResponseId(5);
-
-    handler.handleHeartBeat(hb);
-    assertEquals("Host state should be " + State.STARTED,
-        State.STARTED, serviceComponentHost1.getState());
-
-    serviceComponentHost1.setState(State.STOPPING);
-    cr.setStatus(HostRoleStatus.IN_PROGRESS.toString());
-    cr.setExitCode(777);
-    cr.setRoleCommand("STOP");
-    hb.setResponseId(6);
-
-    handler.handleHeartBeat(hb);
-    assertEquals("Host state should be " + State.STOPPING,
-        State.STOPPING, serviceComponentHost1.getState());
-
-    cr.setStatus(HostRoleStatus.COMPLETED.toString());
-    cr.setExitCode(0);
-    hb.setResponseId(7);
-
-    handler.handleHeartBeat(hb);
-    assertEquals("Host state should be " + State.INSTALLED,
-        State.INSTALLED, serviceComponentHost1.getState());
-  }
-
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testUpgradeSpecificHandling() throws AmbariException, InvalidStateTransitionException {
-    Cluster cluster = getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-
-    ActionQueue aq = new ActionQueue();
-
-    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
-        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
-    serviceComponentHost1.setState(State.UPGRADING);
-
-    HeartBeat hb = new HeartBeat();
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(0);
-    hb.setHostname(DummyHostname1);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-
-    List<CommandReport> reports = new ArrayList<CommandReport>();
-    CommandReport cr = new CommandReport();
-    cr.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr.setTaskId(1);
-    cr.setClusterName(DummyCluster);
-    cr.setServiceName(HDFS);
-    cr.setRole(DATANODE);
-    cr.setRoleCommand("INSTALL");
-    cr.setStatus(HostRoleStatus.IN_PROGRESS.toString());
-    cr.setStdErr("none");
-    cr.setStdOut("dummy output");
-    cr.setExitCode(777);
-    reports.add(cr);
-    hb.setReports(reports);
-    hb.setComponentStatus(new ArrayList<ComponentStatus>());
-
-    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
-            Role.DATANODE, null, null);
-
-    ActionManager am = getMockActionManager();
-    expect(am.getTasks(anyObject(List.class))).andReturn(
-            new ArrayList<HostRoleCommand>() {{
-              add(command);
-            }}).anyTimes();
-    replay(am);
-
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
-    handler.handleHeartBeat(hb);
-    assertEquals("Host state should  be " + State.UPGRADING,
-        State.UPGRADING, serviceComponentHost1.getState());
-
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(1);
-    cr.setStatus(HostRoleStatus.COMPLETED.toString());
-    cr.setExitCode(0);
-
-    handler.handleHeartBeat(hb);
-    assertEquals("Host state should be " + State.INSTALLED,
-        State.INSTALLED, serviceComponentHost1.getState());
-
-    serviceComponentHost1.setState(State.UPGRADING);
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(2);
-    cr.setStatus(HostRoleStatus.FAILED.toString());
-    cr.setExitCode(3);
-
-    handler.handleHeartBeat(hb);
-    assertEquals("Host state should be " + State.UPGRADING,
-        State.UPGRADING, serviceComponentHost1.getState());
-
-    serviceComponentHost1.setState(State.UPGRADING);
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(3);
-    cr.setStatus(HostRoleStatus.PENDING.toString());
-    cr.setExitCode(55);
-
-    handler.handleHeartBeat(hb);
-    assertEquals("Host state should be " + State.UPGRADING,
-        State.UPGRADING, serviceComponentHost1.getState());
-
-    serviceComponentHost1.setState(State.UPGRADING);
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(4);
-    cr.setStatus(HostRoleStatus.QUEUED.toString());
-    cr.setExitCode(55);
-
-    handler.handleHeartBeat(hb);
-    assertEquals("Host state should be " + State.UPGRADING,
-        State.UPGRADING, serviceComponentHost1.getState());
-  }
-
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testStatusHeartbeatWithVersion() throws Exception {
-    Cluster cluster = getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(HDFS_CLIENT).persist();
-    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
-
-    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
-        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
-    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
-        getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
-    ServiceComponentHost serviceComponentHost3 = clusters.getCluster(DummyCluster).getService(HDFS).
-        getServiceComponent(HDFS_CLIENT).getServiceComponentHost(DummyHostname1);
-
-    StackId stack130 = new StackId("HDP-1.3.0");
-    StackId stack120 = new StackId("HDP-1.2.0");
-
-    serviceComponentHost1.setState(State.INSTALLED);
-    serviceComponentHost2.setState(State.STARTED);
-    serviceComponentHost3.setState(State.STARTED);
-    serviceComponentHost1.setStackVersion(stack130);
-    serviceComponentHost2.setStackVersion(stack120);
-    serviceComponentHost3.setStackVersion(stack120);
-
-    HeartBeat hb = new HeartBeat();
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(0);
-    hb.setHostname(DummyHostname1);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-    hb.setReports(new ArrayList<CommandReport>());
-    hb.setAgentEnv(new AgentEnv());
-    hb.setMounts(new ArrayList<DiskInfo>());
-
-    ArrayList<ComponentStatus> componentStatuses = new ArrayList<ComponentStatus>();
-    ComponentStatus componentStatus1 = createComponentStatus(DummyCluster, HDFS, DummyHostStatus, State.STARTED,
-        SecurityState.UNSECURED, DATANODE, "{\"stackName\":\"HDP\",\"stackVersion\":\"1.3.0\"}");
-    ComponentStatus componentStatus2 =
-        createComponentStatus(DummyCluster, HDFS, DummyHostStatus, State.STARTED, SecurityState.UNSECURED, NAMENODE, "");
-    ComponentStatus componentStatus3 = createComponentStatus(DummyCluster, HDFS, DummyHostStatus, State.INSTALLED,
-        SecurityState.UNSECURED, HDFS_CLIENT, "{\"stackName\":\"HDP\",\"stackVersion\":\"1.3.0\"}");
-
-    componentStatuses.add(componentStatus1);
-    componentStatuses.add(componentStatus2);
-    componentStatuses.add(componentStatus3);
-    hb.setComponentStatus(componentStatuses);
-
-    ActionQueue aq = new ActionQueue();
-    ActionManager am = getMockActionManager();
-    expect(am.getTasks(anyObject(List.class))).andReturn(
-            new ArrayList<HostRoleCommand>() {{
-            }});
-    replay(am);
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
-    handler.handleHeartBeat(hb);
-    assertEquals("Matching value " + serviceComponentHost1.getStackVersion(),
-        stack130, serviceComponentHost1.getStackVersion());
-    assertEquals("Matching value " + serviceComponentHost2.getStackVersion(),
-        stack120, serviceComponentHost2.getStackVersion());
-    assertEquals("Matching value " + serviceComponentHost3.getStackVersion(),
-        stack130, serviceComponentHost3.getStackVersion());
-    assertTrue(hb.getAgentEnv().getHostHealth().getServerTimeStampAtReporting() >= hb.getTimestamp());
-  }
-
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testComponentUpgradeCompleteReport() throws AmbariException, InvalidStateTransitionException {
-    Cluster cluster = getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(HDFS_CLIENT).persist();
-    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
-
-    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
-            getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
-    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
-            getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
-
-    StackId stack130 = new StackId("HDP-1.3.0");
-    StackId stack120 = new StackId("HDP-1.2.0");
-
-    serviceComponentHost1.setState(State.UPGRADING);
-    serviceComponentHost2.setState(State.INSTALLING);
-
-    serviceComponentHost1.setStackVersion(stack120);
-    serviceComponentHost1.setDesiredStackVersion(stack130);
-    serviceComponentHost2.setStackVersion(stack120);
-
-    HeartBeat hb = new HeartBeat();
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(0);
-    hb.setHostname(DummyHostname1);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-    CommandReport cr1 = new CommandReport();
-    cr1.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr1.setTaskId(1);
-    cr1.setClusterName(DummyCluster);
-    cr1.setServiceName(HDFS);
-    cr1.setRole(DATANODE);
-    cr1.setStatus(HostRoleStatus.COMPLETED.toString());
-    cr1.setStdErr("none");
-    cr1.setStdOut("dummy output");
-    cr1.setExitCode(0);
-    cr1.setRoleCommand(RoleCommand.UPGRADE.toString());
-
-    CommandReport cr2 = new CommandReport();
-    cr2.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr2.setTaskId(2);
-    cr2.setClusterName(DummyCluster);
-    cr2.setServiceName(HDFS);
-    cr2.setRole(NAMENODE);
-    cr2.setStatus(HostRoleStatus.COMPLETED.toString());
-    cr2.setStdErr("none");
-    cr2.setStdOut("dummy output");
-    cr2.setExitCode(0);
-    cr2.setRoleCommand(RoleCommand.UPGRADE.toString());
-    ArrayList<CommandReport> reports = new ArrayList<CommandReport>();
-    reports.add(cr1);
-    reports.add(cr2);
-    hb.setReports(reports);
-
-    ActionQueue aq = new ActionQueue();
-    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
-            Role.DATANODE, null, null);
-
-    ActionManager am = getMockActionManager();
-    expect(am.getTasks(anyObject(List.class))).andReturn(
-            new ArrayList<HostRoleCommand>() {{
-              add(command);
-              add(command);
-            }});
-    replay(am);
-
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
-    handler.handleHeartBeat(hb);
-    assertEquals("Stack version for SCH should be updated to " +
-            serviceComponentHost1.getDesiredStackVersion(),
-            stack130, serviceComponentHost1.getStackVersion());
-    assertEquals("Stack version for SCH should not change ",
-            stack120, serviceComponentHost2.getStackVersion());
-  }
-
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testComponentUpgradeInProgressReport() throws AmbariException, InvalidStateTransitionException {
-    Cluster cluster = getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(HDFS_CLIENT).persist();
-    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
-
-    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
-            getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
-    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
-            getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
-
-    StackId stack130 = new StackId("HDP-1.3.0");
-    StackId stack120 = new StackId("HDP-1.2.0");
-
-    serviceComponentHost1.setState(State.UPGRADING);
-    serviceComponentHost2.setState(State.INSTALLING);
-
-    serviceComponentHost1.setStackVersion(stack120);
-    serviceComponentHost1.setDesiredStackVersion(stack130);
-    serviceComponentHost2.setStackVersion(stack120);
-
-    HeartBeat hb = new HeartBeat();
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(0);
-    hb.setHostname(DummyHostname1);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-    CommandReport cr1 = new CommandReport();
-    cr1.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr1.setTaskId(1);
-    cr1.setClusterName(DummyCluster);
-    cr1.setServiceName(HDFS);
-    cr1.setRole(DATANODE);
-    cr1.setRoleCommand("INSTALL");
-    cr1.setStatus(HostRoleStatus.IN_PROGRESS.toString());
-    cr1.setStdErr("none");
-    cr1.setStdOut("dummy output");
-    cr1.setExitCode(777);
-
-    CommandReport cr2 = new CommandReport();
-    cr2.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr2.setTaskId(2);
-    cr2.setClusterName(DummyCluster);
-    cr2.setServiceName(HDFS);
-    cr2.setRole(NAMENODE);
-    cr2.setRoleCommand("INSTALL");
-    cr2.setStatus(HostRoleStatus.IN_PROGRESS.toString());
-    cr2.setStdErr("none");
-    cr2.setStdOut("dummy output");
-    cr2.setExitCode(777);
-    ArrayList<CommandReport> reports = new ArrayList<CommandReport>();
-    reports.add(cr1);
-    reports.add(cr2);
+    cr.setActionId(StageUtils.getActionId(1, 1));
+    cr.setTaskId(1);
+    cr.setClusterName(DummyCluster);
+    cr.setServiceName(HDFS);
+    cr.setRole(DATANODE);
+    cr.setRoleCommand("INSTALL");
+    cr.setStatus("FAILED");
+    cr.setStdErr("none");
+    cr.setStdOut("dummy output");
+    cr.setExitCode(777);
+    reports.add(cr);
     hb.setReports(reports);
+    hb.setComponentStatus(new ArrayList<ComponentStatus>());
 
-    ActionQueue aq = new ActionQueue();
     final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
             Role.DATANODE, null, null);
 
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     expect(am.getTasks(anyObject(List.class))).andReturn(
             new ArrayList<HostRoleCommand>() {{
               add(command);
-              add(command);
             }});
     replay(am);
 
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
     handler.handleHeartBeat(hb);
-    assertEquals("State of SCH not change while operation is in progress",
-            State.UPGRADING, serviceComponentHost1.getState());
-    assertEquals("Stack version of SCH should not change after in progress report",
-            stack130, serviceComponentHost1.getDesiredStackVersion());
-    assertEquals("State of SCH not change while operation is  in progress",
-            State.INSTALLING, serviceComponentHost2.getState());
+    handler.getHeartbeatProcessor().processHeartbeat(hb);
+    State componentState1 = serviceComponentHost1.getState();
+    assertEquals("Host state should still be installing", State.INSTALLING,
+      componentState1);
   }
 
 
+
+
   @Test
   @SuppressWarnings("unchecked")
-  public void testComponentUpgradeFailReport() throws AmbariException, InvalidStateTransitionException {
-    Cluster cluster = getDummyCluster();
+  public void testStatusHeartbeatWithVersion() throws Exception {
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
     hdfs.persist();
     hdfs.addServiceComponent(DATANODE).persist();
@@ -1840,117 +914,73 @@ public class TestHeartbeatHandler {
     hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
 
     ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
-            getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
+        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
     ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
-            getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
+        getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
+    ServiceComponentHost serviceComponentHost3 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(HDFS_CLIENT).getServiceComponentHost(DummyHostname1);
 
     StackId stack130 = new StackId("HDP-1.3.0");
     StackId stack120 = new StackId("HDP-1.2.0");
 
-    serviceComponentHost1.setState(State.UPGRADING);
-    serviceComponentHost2.setState(State.INSTALLING);
-
-    serviceComponentHost1.setStackVersion(stack120);
-    serviceComponentHost1.setDesiredStackVersion(stack130);
+    serviceComponentHost1.setState(State.INSTALLED);
+    serviceComponentHost2.setState(State.STARTED);
+    serviceComponentHost3.setState(State.STARTED);
+    serviceComponentHost1.setStackVersion(stack130);
     serviceComponentHost2.setStackVersion(stack120);
-
-    Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action manager test",
-        "clusterHostInfo", "commandParamsStage", "hostParamsStage");
-    s.setStageId(stageId);
-    s.addHostRoleExecutionCommand(DummyHostname1, Role.DATANODE, RoleCommand.UPGRADE,
-      new ServiceComponentHostUpgradeEvent(Role.DATANODE.toString(),
-        DummyHostname1, System.currentTimeMillis(), "HDP-1.3.0"),
-      DummyCluster, "HDFS", false, false);
-    s.addHostRoleExecutionCommand(DummyHostname1, Role.NAMENODE, RoleCommand.INSTALL,
-      new ServiceComponentHostInstallEvent(Role.NAMENODE.toString(),
-        DummyHostname1, System.currentTimeMillis(), "HDP-1.3.0"),
-          DummyCluster, "HDFS", false, false);
-    List<Stage> stages = new ArrayList<Stage>();
-    stages.add(s);
-    Request request = new Request(stages, clusters);
-    actionDBAccessor.persistActions(request);
-    CommandReport cr = new CommandReport();
-    cr.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr.setTaskId(1);
-    cr.setClusterName(DummyCluster);
-    cr.setServiceName(HDFS);
-    cr.setRole(DATANODE);
-    cr.setStatus(HostRoleStatus.IN_PROGRESS.toString());
-    cr.setStdErr("none");
-    cr.setStdOut("dummy output");
-    actionDBAccessor.updateHostRoleState(DummyHostname1, requestId, stageId,
-      Role.DATANODE.name(), cr);
-    cr.setRole(NAMENODE);
-    cr.setTaskId(2);
-    actionDBAccessor.updateHostRoleState(DummyHostname1, requestId, stageId,
-      Role.NAMENODE.name(), cr);
+    serviceComponentHost3.setStackVersion(stack120);
 
     HeartBeat hb = new HeartBeat();
     hb.setTimestamp(System.currentTimeMillis());
     hb.setResponseId(0);
     hb.setHostname(DummyHostname1);
     hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-    CommandReport cr1 = new CommandReport();
-    cr1.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr1.setTaskId(1);
-    cr1.setClusterName(DummyCluster);
-    cr1.setServiceName(HDFS);
-    cr1.setRole(DATANODE);
-    cr1.setRoleCommand("INSTALL");
-    cr1.setStatus(HostRoleStatus.FAILED.toString());
-    cr1.setStdErr("none");
-    cr1.setStdOut("dummy output");
-    cr1.setExitCode(0);
+    hb.setReports(new ArrayList<CommandReport>());
+    hb.setAgentEnv(new AgentEnv());
+    hb.setMounts(new ArrayList<DiskInfo>());
 
-    CommandReport cr2 = new CommandReport();
-    cr2.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr2.setTaskId(2);
-    cr2.setClusterName(DummyCluster);
-    cr2.setServiceName(HDFS);
-    cr2.setRole(NAMENODE);
-    cr2.setRoleCommand("INSTALL");
-    cr2.setStatus(HostRoleStatus.FAILED.toString());
-    cr2.setStdErr("none");
-    cr2.setStdOut("dummy output");
-    cr2.setExitCode(0);
-    ArrayList<CommandReport> reports = new ArrayList<CommandReport>();
-    reports.add(cr1);
-    reports.add(cr2);
-    hb.setReports(reports);
+    ArrayList<ComponentStatus> componentStatuses = new ArrayList<ComponentStatus>();
+    ComponentStatus componentStatus1 = createComponentStatus(DummyCluster, HDFS, DummyHostStatus, State.STARTED,
+        SecurityState.UNSECURED, DATANODE, "{\"stackName\":\"HDP\",\"stackVersion\":\"1.3.0\"}");
+    ComponentStatus componentStatus2 =
+        createComponentStatus(DummyCluster, HDFS, DummyHostStatus, State.STARTED, SecurityState.UNSECURED, NAMENODE, "");
+    ComponentStatus componentStatus3 = createComponentStatus(DummyCluster, HDFS, DummyHostStatus, State.INSTALLED,
+        SecurityState.UNSECURED, HDFS_CLIENT, "{\"stackName\":\"HDP\",\"stackVersion\":\"1.3.0\"}");
 
-    ActionQueue aq = new ActionQueue();
-    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
-            Role.DATANODE, null, null);
+    componentStatuses.add(componentStatus1);
+    componentStatuses.add(componentStatus2);
+    componentStatuses.add(componentStatus3);
+    hb.setComponentStatus(componentStatuses);
 
-    ActionManager am = getMockActionManager();
+    ActionQueue aq = new ActionQueue();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     expect(am.getTasks(anyObject(List.class))).andReturn(
             new ArrayList<HostRoleCommand>() {{
-              add(command);
-              add(command);
             }});
     replay(am);
 
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
+    HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
     handler.handleHeartBeat(hb);
-    assertEquals("State of SCH should change after fail report",
-        State.UPGRADING, serviceComponentHost1.getState());
-    assertEquals("State of SCH should change after fail report",
-            State.INSTALL_FAILED, serviceComponentHost2.getState());
-    assertEquals("Stack version of SCH should not change after fail report",
-            stack120, serviceComponentHost1.getStackVersion());
-    assertEquals("Stack version of SCH should not change after fail report",
-            stack130, serviceComponentHost1.getDesiredStackVersion());
-    assertEquals("Stack version of SCH should not change after fail report",
-            State.INSTALL_FAILED, serviceComponentHost2.getState());
+    heartbeatProcessor.processHeartbeat(hb);
+
+    assertEquals("Matching value " + serviceComponentHost1.getStackVersion(),
+        stack130, serviceComponentHost1.getStackVersion());
+    assertEquals("Matching value " + serviceComponentHost2.getStackVersion(),
+        stack120, serviceComponentHost2.getStackVersion());
+    assertEquals("Matching value " + serviceComponentHost3.getStackVersion(),
+        stack130, serviceComponentHost3.getStackVersion());
+    assertTrue(hb.getAgentEnv().getHostHealth().getServerTimeStampAtReporting() >= hb.getTimestamp());
   }
 
 
+
   @Test
   @SuppressWarnings("unchecked")
   public void testRecoveryStatusReports() throws Exception {
     Clusters fsm = clusters;
 
-    Cluster cluster = getDummyCluster();
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Host hostObject = clusters.getHost(DummyHostname1);
     Service hdfs = cluster.addService(HDFS);
     hdfs.persist();
@@ -1964,7 +994,7 @@ public class TestHeartbeatHandler {
     ActionQueue aq = new ActionQueue();
 
     final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1, Role.DATANODE, null, null);
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     expect(am.getTasks(anyObject(List.class))).andReturn(
         new ArrayList<HostRoleCommand>() {{
           add(command);
@@ -2032,7 +1062,7 @@ public class TestHeartbeatHandler {
   public void testProcessStatusReports() throws Exception {
     Clusters fsm = clusters;
 
-    Cluster cluster = getDummyCluster();
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Host hostObject = clusters.getHost(DummyHostname1);
     Service hdfs = cluster.addService(HDFS);
     hdfs.persist();
@@ -2047,7 +1077,7 @@ public class TestHeartbeatHandler {
 
     final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
             Role.DATANODE, null, null);
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     expect(am.getTasks(anyObject(List.class))).andReturn(
             new ArrayList<HostRoleCommand>() {{
               add(command);
@@ -2055,6 +1085,7 @@ public class TestHeartbeatHandler {
             }}).anyTimes();
     replay(am);
     HeartBeatHandler handler = new HeartBeatHandler(fsm, aq, am, injector);
+    HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
 
     Register reg = new Register();
     HostInfo hi = new HostInfo();
@@ -2092,6 +1123,7 @@ public class TestHeartbeatHandler {
     componentStatus.add(nameNodeStatus);
     hb1.setComponentStatus(componentStatus);
     handler.handleHeartBeat(hb1);
+    heartbeatProcessor.processHeartbeat(hb1);
     assertEquals(HostHealthStatus.HealthStatus.HEALTHY.name(), hostObject.getStatus());
 
     //Some slaves are down, masters are up
@@ -2116,6 +1148,7 @@ public class TestHeartbeatHandler {
     componentStatus.add(nameNodeStatus);
     hb2.setComponentStatus(componentStatus);
     handler.handleHeartBeat(hb2);
+    heartbeatProcessor.processHeartbeat(hb2);
     assertEquals(HostHealthStatus.HealthStatus.ALERT.name(), hostObject.getStatus());
 
     // mark the installed DN as maintenance
@@ -2142,6 +1175,7 @@ public class TestHeartbeatHandler {
     componentStatus.add(nameNodeStatus);
     hb2a.setComponentStatus(componentStatus);
     handler.handleHeartBeat(hb2a);
+    heartbeatProcessor.processHeartbeat(hb2a);
     assertEquals(HostHealthStatus.HealthStatus.HEALTHY.name(), hostObject.getStatus());
 
     hdfs.getServiceComponent(DATANODE).getServiceComponentHost(
@@ -2169,11 +1203,13 @@ public class TestHeartbeatHandler {
     componentStatus.add(nameNodeStatus);
     hb3.setComponentStatus(componentStatus);
     handler.handleHeartBeat(hb3);
+    heartbeatProcessor.processHeartbeat(hb3);
     assertEquals(HostHealthStatus.HealthStatus.UNHEALTHY.name(), hostObject.getStatus());
 
     //All are up
     hb1.setResponseId(4);
     handler.handleHeartBeat(hb1);
+    heartbeatProcessor.processHeartbeat(hb1);
     assertEquals(HostHealthStatus.HealthStatus.HEALTHY.name(), hostObject.getStatus());
 
     reset(am);
@@ -2199,10 +1235,12 @@ public class TestHeartbeatHandler {
     componentStatus.add(dataNodeStatus);
     hb4.setComponentStatus(componentStatus);
     handler.handleHeartBeat(hb4);
+    heartbeatProcessor.processHeartbeat(hb4);
     assertEquals(HostHealthStatus.HealthStatus.UNHEALTHY.name(), hostObject.getStatus());
 
     hb1.setResponseId(6);
     handler.handleHeartBeat(hb1);
+    heartbeatProcessor.processHeartbeat(hb1);
     assertEquals(HostHealthStatus.HealthStatus.HEALTHY.name(), hostObject.getStatus());
 
     //Some command reports
@@ -2225,6 +1263,7 @@ public class TestHeartbeatHandler {
     reports.add(cr1);
     hb5.setReports(reports);
     handler.handleHeartBeat(hb5);
+    heartbeatProcessor.processHeartbeat(hb5);
     assertEquals(HostHealthStatus.HealthStatus.ALERT.name(), hostObject.getStatus());
   }
 
@@ -2270,7 +1309,7 @@ public class TestHeartbeatHandler {
     final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
             Role.DATANODE, null, null);
 
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     expect(am.getTasks(anyObject(List.class))).andReturn(
             new ArrayList<HostRoleCommand>() {{
               add(command);
@@ -2278,12 +1317,13 @@ public class TestHeartbeatHandler {
             }});
     replay(am);
 
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
 
     // CUSTOM_COMMAND and ACTIONEXECUTE reports are ignored
     // they should not change the host component state
     try {
       handler.handleHeartBeat(hb);
+      handler.getHeartbeatProcessor().processHeartbeat(hb);
     } catch (Exception e) {
       fail();
     }
@@ -2321,8 +1361,8 @@ public class TestHeartbeatHandler {
     expected.setStackVersion(dummyStackId.getStackVersion());
     expected.setComponents(dummyComponents);
 
-    getDummyCluster();
-    HeartBeatHandler handler = getHeartBeatHandler(getMockActionManager(),
+    heartbeatTestHelper.getDummyCluster();
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(heartbeatTestHelper.getMockActionManager(),
         new ActionQueue());
 
     ComponentsResponse actual = handler.handleComponents(DummyCluster);
@@ -2337,19 +1377,7 @@ public class TestHeartbeatHandler {
     assertEquals(expected.getComponents(), actual.getComponents());
   }
 
-  private ActionManager getMockActionManager() {
-    ActionQueue actionQueueMock = createNiceMock(ActionQueue.class);
-    Clusters clustersMock = createNiceMock(Clusters.class);
-    Configuration configurationMock = createNiceMock(Configuration.class);
-
-    ActionManager actionManager = createMockBuilder(ActionManager.class).
-            addMockedMethod("getTasks").
-            withConstructor((long)0, (long)0, actionQueueMock, clustersMock,
-                    actionDBAccessor, new HostsMap((String) null), unitOfWork,
-                    injector.getInstance(RequestFactory.class), configurationMock, createNiceMock(AmbariEventPublisher.class)).
-            createMock();
-    return actionManager;
-  }
+
 
 
   private ComponentStatus createComponentStatus(String clusterName, String serviceName, String message,
@@ -2366,164 +1394,11 @@ public class TestHeartbeatHandler {
     return componentStatus1;
   }
 
-  private HeartBeatHandler getHeartBeatHandler(ActionManager am, ActionQueue aq)
-      throws InvalidStateTransitionException, AmbariException {
-    HeartBeatHandler handler = new HeartBeatHandler(clusters, aq, am, injector);
-    Register reg = new Register();
-    HostInfo hi = new HostInfo();
-    hi.setHostName(DummyHostname1);
-    hi.setOS(DummyOs);
-    hi.setOSRelease(DummyOSRelease);
-    reg.setHostname(DummyHostname1);
-    reg.setResponseId(0);
-    reg.setHardwareProfile(hi);
-    reg.setAgentVersion(metaInfo.getServerVersion());
-    handler.handleRegistration(reg);
-    return handler;
-  }
-
-  private Cluster getDummyCluster()
-      throws AmbariException {
-    StackEntity stackEntity = stackDAO.find(HDP_22_STACK.getStackName(), HDP_22_STACK.getStackVersion());
-    org.junit.Assert.assertNotNull(stackEntity);
-
-    // Create the cluster
-    ResourceTypeEntity resourceTypeEntity = new ResourceTypeEntity();
-    resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
-    resourceTypeEntity.setName(ResourceType.CLUSTER.name());
-    resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
-
-    ResourceEntity resourceEntity = new ResourceEntity();
-    resourceEntity.setResourceType(resourceTypeEntity);
-
-    ClusterEntity clusterEntity = new ClusterEntity();
-    clusterEntity.setClusterName(DummyCluster);
-    clusterEntity.setClusterInfo("test_cluster_info1");
-    clusterEntity.setResource(resourceEntity);
-    clusterEntity.setDesiredStack(stackEntity);
-
-    clusterDAO.create(clusterEntity);
-
-    StackId stackId = new StackId(DummyStackId);
-
-    Cluster cluster = clusters.getCluster(DummyCluster);
-
-    cluster.setDesiredStackVersion(stackId);
-    cluster.setCurrentStackVersion(stackId);
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
-    cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
-        RepositoryVersionState.UPGRADING);
-
-    Set<String> hostNames = new HashSet<String>(){{
-      add(DummyHostname1);
-    }};
-
-    Map<String, String> hostAttributes = new HashMap<String, String>();
-    hostAttributes.put("os_family", "redhat");
-    hostAttributes.put("os_release_version", "6.3");
-
-    List<HostEntity> hostEntities = new ArrayList<HostEntity>();
-    for(String hostName : hostNames) {
-      clusters.addHost(hostName);
-      Host host = clusters.getHost(hostName);
-      host.setHostAttributes(hostAttributes);
-      host.persist();
-
-      HostEntity hostEntity = hostDAO.findByName(hostName);
-      Assert.assertNotNull(hostEntity);
-      hostEntities.add(hostEntity);
-    }
-    clusterEntity.setHostEntities(hostEntities);
-    clusters.mapHostsToCluster(hostNames, DummyCluster);
-
-    return cluster;
-  }
-
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testCommandStatusProcesses() throws Exception {
-    Cluster cluster = getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
-
-    ActionQueue aq = new ActionQueue();
-
-    HeartBeat hb = new HeartBeat();
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(0);
-    hb.setHostname(DummyHostname1);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-    hb.setReports(new ArrayList<CommandReport>());
-
-    List<Map<String, String>> procs = new ArrayList<Map<String, String>>();
-    Map<String, String> proc1info = new HashMap<String, String>();
-    proc1info.put("name", "a");
-    proc1info.put("status", "RUNNING");
-    procs.add(proc1info);
-
-    Map<String, String> proc2info = new HashMap<String, String>();
-    proc2info.put("name", "b");
-    proc2info.put("status", "NOT_RUNNING");
-    procs.add(proc2info);
-
-    Map<String, Object> extra = new HashMap<String, Object>();
-    extra.put("processes", procs);
-
-    ArrayList<ComponentStatus> componentStatuses = new ArrayList<ComponentStatus>();
-    ComponentStatus componentStatus1 = new ComponentStatus();
-    componentStatus1.setClusterName(DummyCluster);
-    componentStatus1.setServiceName(HDFS);
-    componentStatus1.setMessage(DummyHostStatus);
-    componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
-    componentStatus1.setComponentName(DATANODE);
-
-    componentStatus1.setExtra(extra);
-    componentStatuses.add(componentStatus1);
-    hb.setComponentStatus(componentStatuses);
-
-    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
-            Role.DATANODE, null, null);
-
-    ActionManager am = getMockActionManager();
-    expect(am.getTasks(anyObject(List.class))).andReturn(
-            new ArrayList<HostRoleCommand>() {{
-              add(command);
-            }}).anyTimes();
-    replay(am);
-
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
-    handler.handleHeartBeat(hb);
-    ServiceComponentHost sch = hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
-
-    Assert.assertEquals(Integer.valueOf(2), Integer.valueOf(sch.getProcesses().size()));
-
-    hb = new HeartBeat();
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(1);
-    hb.setHostname(DummyHostname1);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-    hb.setReports(new ArrayList<CommandReport>());
-
-    componentStatus1 = new ComponentStatus();
-    componentStatus1.setClusterName(DummyCluster);
-    componentStatus1.setServiceName(HDFS);
-    componentStatus1.setMessage(DummyHostStatus);
-    componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
-    componentStatus1.setComponentName(DATANODE);
-    hb.setComponentStatus(Collections.singletonList(componentStatus1));
-
-    handler.handleHeartBeat(hb);
-  }
 
   @Test
   @SuppressWarnings("unchecked")
   public void testCommandStatusProcesses_empty() throws Exception {
-    Cluster cluster = getDummyCluster();
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
     hdfs.persist();
     hdfs.addServiceComponent(DATANODE).persist();
@@ -2553,137 +1428,19 @@ public class TestHeartbeatHandler {
     final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
             Role.DATANODE, null, null);
 
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     expect(am.getTasks(anyObject(List.class))).andReturn(
             new ArrayList<HostRoleCommand>() {{
               add(command);
             }});
     replay(am);
 
-    HeartBeatHandler handler = getHeartBeatHandler(am, aq);
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
     ServiceComponentHost sch = hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
 
     Assert.assertEquals(Integer.valueOf(0), Integer.valueOf(sch.getProcesses().size()));
   }
 
-  /**
-   * Tests that if there is an invalid cluster in heartbeat data, the heartbeat
-   * doesn't fail.
-   *
-   * @throws Exception
-   */
-  @Test
-  @SuppressWarnings("unchecked")
-  public void testHeartBeatWithAlertAndInvalidCluster() throws Exception {
-    ActionManager am = getMockActionManager();
-
-    expect(am.getTasks(anyObject(List.class))).andReturn(
-        new ArrayList<HostRoleCommand>());
-
-    replay(am);
-
-    Cluster cluster = getDummyCluster();
-    Clusters fsm = clusters;
-    Host hostObject = clusters.getHost(DummyHostname1);
-    hostObject.setIPv4("ipv4");
-    hostObject.setIPv6("ipv6");
-    hostObject.setOsType(DummyOsType);
-
-    ActionQueue aq = new ActionQueue();
-
-    HeartBeatHandler handler = new HeartBeatHandler(fsm, aq, am, injector);
-    Register reg = new Register();
-    HostInfo hi = new HostInfo();
-    hi.setHostName(DummyHostname1);
-    hi.setOS(DummyOs);
-    hi.setOSRelease(DummyOSRelease);
-    reg.setHostname(DummyHostname1);
-    reg.setHardwareProfile(hi);
-    reg.setAgentVersion(metaInfo.getServerVersion());
-    handler.handleRegistration(reg);
-
-    hostObject.setState(HostState.UNHEALTHY);
-
-    ExecutionCommand execCmd = new ExecutionCommand();
-    execCmd.setRequestAndStage(2, 34);
-    execCmd.setHostname(DummyHostname1);
-    aq.enqueue(DummyHostname1, new ExecutionCommand());
-
-    HeartBeat hb = new HeartBeat();
-    HostStatus hs = new HostStatus(Status.HEALTHY, DummyHostStatus);
-
-    hb.setResponseId(0);
-    hb.setNodeStatus(hs);
-    hb.setHostname(DummyHostname1);
-
-    Alert alert = new Alert("foo", "bar", "baz", "foobar", "foobarbaz",
-        AlertState.OK);
-
-    alert.setCluster("BADCLUSTER");
-
-    List<Alert> alerts = Collections.singletonList(alert);
-    hb.setAlerts(alerts);
-
-    // should NOT throw AmbariException from alerts.
-    handler.handleHeartBeat(hb);
-  }
-
-  @Test
-  public void testInstallPackagesWithVersion() throws Exception {
-    // required since this test method checks the DAO result of handling a
-    // heartbeat which performs some async tasks
-    EventBusSynchronizer.synchronizeAmbariEventPublisher(injector);
-
-    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
-        Role.DATANODE, null, null);
-
-    ActionManager am = getMockActionManager();
-    expect(am.getTasks(anyObject(List.class))).andReturn(
-        Collections.singletonList(command)).anyTimes();
-    replay(am);
-
-    Cluster cluster = getDummyCluster();
-    HeartBeatHandler handler = getHeartBeatHandler(am, new ActionQueue());
-    HeartBeat hb = new HeartBeat();
-
-    JsonObject json = new JsonObject();
-    json.addProperty("actual_version", "2.2.1.0-2222");
-    json.addProperty("package_installation_result", "SUCCESS");
-    json.addProperty("installed_repository_version", "0.1");
-    json.addProperty("stack_id", cluster.getDesiredStackVersion().getStackId());
-
-
-    CommandReport cmdReport = new CommandReport();
-    cmdReport.setActionId(StageUtils.getActionId(requestId, stageId));
-    cmdReport.setTaskId(1);
-    cmdReport.setCustomCommand("install_packages");
-    cmdReport.setStructuredOut(json.toString());
-    cmdReport.setRoleCommand(RoleCommand.ACTIONEXECUTE.name());
-    cmdReport.setStatus(HostRoleStatus.COMPLETED.name());
-    cmdReport.setRole("install_packages");
-    cmdReport.setClusterName(DummyCluster);
-
-    hb.setReports(Collections.singletonList(cmdReport));
-    hb.setTimestamp(0L);
-    hb.setResponseId(0);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-    hb.setHostname(DummyHostname1);
-    hb.setComponentStatus(new ArrayList<ComponentStatus>());
-
-    StackId stackId = new StackId("HDP", "0.1");
-
-    RepositoryVersionDAO dao = injector.getInstance(RepositoryVersionDAO.class);
-    RepositoryVersionEntity entity = dao.findByStackAndVersion(stackId, "0.1");
-    Assert.assertNotNull(entity);
-
-    handler.handleHeartBeat(hb);
-
-    entity = dao.findByStackAndVersion(stackId, "0.1");
-    Assert.assertNull(entity);
-
-    entity = dao.findByStackAndVersion(stackId, "2.2.1.0-2222");
-    Assert.assertNotNull(entity);
-  }
 
   @Test
   public void testInjectKeytabApplicableHost() throws Exception {
@@ -2758,14 +1515,14 @@ public class TestHeartbeatHandler {
     final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
         Role.DATANODE, null, null);
 
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     expect(am.getTasks(anyObject(List.class))).andReturn(
         new ArrayList<HostRoleCommand>() {{
           add(command);
         }});
     replay(am);
 
-    getHeartBeatHandler(am, aq).injectKeytab(executionCommand, "SET_KEYTAB", targetHost);
+    heartbeatTestHelper.getHeartBeatHandler(am, aq).injectKeytab(executionCommand, "SET_KEYTAB", targetHost);
 
     return executionCommand.getKerberosCommandParams();
   }
@@ -2789,14 +1546,14 @@ public class TestHeartbeatHandler {
     final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
         Role.DATANODE, null, null);
 
-    ActionManager am = getMockActionManager();
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
     expect(am.getTasks(anyObject(List.class))).andReturn(
         new ArrayList<HostRoleCommand>() {{
           add(command);
         }});
     replay(am);
 
-    getHeartBeatHandler(am, aq).injectKeytab(executionCommand, "REMOVE_KEYTAB", targetHost);
+    heartbeatTestHelper.getHeartBeatHandler(am, aq).injectKeytab(executionCommand, "REMOVE_KEYTAB", targetHost);
 
     return executionCommand.getKerberosCommandParams();
   }


[11/11] ambari git commit: Merge branch 'trunk' into branch-dev-patch-upgrade

Posted by nc...@apache.org.
Merge branch 'trunk' into branch-dev-patch-upgrade


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e06d95d1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e06d95d1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e06d95d1

Branch: refs/heads/branch-dev-patch-upgrade
Commit: e06d95d1b779da2762e46de6c8d72a3523498401
Parents: cbef0c1 083ac6d
Author: Nate Cole <nc...@hortonworks.com>
Authored: Tue Feb 23 16:21:20 2016 -0500
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Tue Feb 23 16:21:20 2016 -0500

----------------------------------------------------------------------
 .../loginActivities/LoginMessageMainCtrl.js     |   51 +-
 .../ui/admin-web/app/scripts/i18n.config.js     |    4 +-
 .../app/views/loginActivities/loginMessage.html |   14 +-
 .../src/main/repo/install_ambari_tarball.py     |    2 +-
 ambari-server/conf/unix/ca.config               |    3 +-
 ambari-server/conf/unix/install-helper.sh       |   28 +-
 ambari-server/conf/unix/log4j.properties        |    3 +-
 ambari-server/pom.xml                           |    1 +
 .../ambari/server/agent/HeartBeatHandler.java   |  550 +------
 .../ambari/server/agent/HeartbeatMonitor.java   |    6 +
 .../ambari/server/agent/HeartbeatProcessor.java |  773 +++++++++
 .../api/services/ActiveWidgetLayoutService.java |   10 +-
 .../api/services/UserAuthorizationService.java  |    4 +-
 .../api/services/UserPrivilegeService.java      |    3 +-
 .../ambari/server/api/services/UserService.java |    3 +-
 .../server/configuration/Configuration.java     |   23 +-
 .../ambari/server/controller/AmbariServer.java  |    6 +-
 .../ambari/server/orm/dao/HostVersionDAO.java   |   78 +-
 .../server/orm/entities/HostVersionEntity.java  |    9 +
 .../encryption/MasterKeyServiceImpl.java        |    3 +-
 .../server/state/cluster/ClusterImpl.java       |    6 +-
 .../svccomphost/ServiceComponentHostImpl.java   |   72 +-
 .../apache/ambari/server/utils/AmbariPath.java  |   39 +
 .../src/main/package/deb/control/postinst       |    2 +-
 .../src/main/package/deb/control/preinst        |   22 +-
 .../src/main/package/deb/control/prerm          |    2 +-
 .../src/main/package/rpm/postinstall.sh         |   10 +-
 .../src/main/package/rpm/posttrans_server.sh    |   10 +-
 .../src/main/package/rpm/preinstall.sh          |   19 +-
 ambari-server/src/main/package/rpm/preremove.sh |    6 +-
 .../server/agent/HeartbeatProcessorTest.java    | 1290 +++++++++++++++
 .../server/agent/HeartbeatTestHelper.java       |  229 +++
 .../server/agent/TestHeartbeatHandler.java      | 1488 ++----------------
 .../services/ActiveWidgetLayoutServiceTest.java |   76 +
 .../services/UserAuthorizationServiceTest.java  |   12 +
 .../api/services/UserPrivilegeServiceTest.java  |   13 +
 .../server/api/services/UserServiceTest.java    |   71 +
 .../resourceManager/step3_controller.js         |    2 +-
 .../main/admin/kerberos/step4_controller.js     |    3 +-
 .../main/admin/serviceAccounts_controller.js    |    5 +-
 .../controllers/main/service/info/configs.js    |   32 +-
 ambari-web/app/controllers/wizard.js            |    1 -
 .../app/controllers/wizard/step7_controller.js  |   66 +-
 .../configs/stack_config_properties_mapper.js   |    4 +-
 .../common/kdc_credentials_controller_mixin.js  |    2 +-
 ambari-web/app/models/stack_service.js          |   15 +-
 ambari-web/app/router.js                        |   11 +-
 ambari-web/app/utils.js                         |    1 +
 ambari-web/app/utils/config.js                  |  187 +--
 ambari-web/app/utils/configs/theme/theme.js     |  103 ++
 .../configs/widgets/config_widget_view.js       |    2 +-
 .../admin/kerberos/step4_controller_test.js     |    9 +-
 .../test/controllers/wizard/step7_test.js       |    7 +-
 ambari-web/test/utils/config_test.js            |  126 +-
 54 files changed, 3149 insertions(+), 2368 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e06d95d1/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/e06d95d1/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
index a4136ee,ba14446..a13b421
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
@@@ -339,448 -286,7 +286,6 @@@ public class HeartBeatHandler 
      host.setRecoveryReport(recoveryReport);
    }
  
-   protected void processHostStatus(HeartBeat heartbeat, String hostname) throws AmbariException {
- 
-     Host host = clusterFsm.getHost(hostname);
-     HealthStatus healthStatus = host.getHealthStatus().getHealthStatus();
- 
-     if (!healthStatus.equals(HostHealthStatus.HealthStatus.UNKNOWN)) {
- 
-       List<ComponentStatus> componentStatuses = heartbeat.getComponentStatus();
-       //Host status info could be calculated only if agent returned statuses in heartbeat
-       //Or, if a command is executed that can change component status
-       boolean calculateHostStatus = false;
-       String clusterName = null;
-       if (componentStatuses.size() > 0) {
-         calculateHostStatus = true;
-         for (ComponentStatus componentStatus : componentStatuses) {
-           clusterName = componentStatus.getClusterName();
-           break;
-         }
-       }
- 
-       if (!calculateHostStatus) {
-         List<CommandReport> reports = heartbeat.getReports();
-         for (CommandReport report : reports) {
-           if (RoleCommand.ACTIONEXECUTE.toString().equals(report.getRoleCommand())) {
-             continue;
-           }
- 
-           String service = report.getServiceName();
-           if (actionMetadata.getActions(service.toLowerCase()).contains(report.getRole())) {
-             continue;
-           }
-           if (report.getStatus().equals("COMPLETED")) {
-             calculateHostStatus = true;
-             clusterName = report.getClusterName();
-             break;
-           }
-         }
-       }
- 
-       if (calculateHostStatus) {
-         //Use actual component status to compute the host status
-         int masterCount = 0;
-         int mastersRunning = 0;
-         int slaveCount = 0;
-         int slavesRunning = 0;
- 
-         StackId stackId;
-         Cluster cluster = clusterFsm.getCluster(clusterName);
-         stackId = cluster.getDesiredStackVersion();
- 
-         MaintenanceStateHelper psh = injector.getInstance(MaintenanceStateHelper.class);
- 
-         List<ServiceComponentHost> scHosts = cluster.getServiceComponentHosts(heartbeat.getHostname());
-         for (ServiceComponentHost scHost : scHosts) {
-           ComponentInfo componentInfo =
-               ambariMetaInfo.getComponent(stackId.getStackName(),
-                   stackId.getStackVersion(), scHost.getServiceName(),
-                   scHost.getServiceComponentName());
- 
-           String status = scHost.getState().name();
- 
-           String category = componentInfo.getCategory();
- 
-           if (MaintenanceState.OFF == psh.getEffectiveState(scHost, host)) {
-             if (category.equals("MASTER")) {
-               ++masterCount;
-               if (status.equals("STARTED")) {
-                 ++mastersRunning;
-               }
-             } else if (category.equals("SLAVE")) {
-               ++slaveCount;
-               if (status.equals("STARTED")) {
-                 ++slavesRunning;
-               }
-             }
-           }
-         }
- 
-         if (masterCount == mastersRunning && slaveCount == slavesRunning) {
-           healthStatus = HealthStatus.HEALTHY;
-         } else if (masterCount > 0 && mastersRunning < masterCount) {
-           healthStatus = HealthStatus.UNHEALTHY;
-         } else {
-           healthStatus = HealthStatus.ALERT;
-         }
- 
-         host.setStatus(healthStatus.name());
-         host.persist();
-       }
- 
-       //If host doesn't belong to any cluster
-       if ((clusterFsm.getClustersForHost(host.getHostName())).size() == 0) {
-         healthStatus = HealthStatus.HEALTHY;
-         host.setStatus(healthStatus.name());
-         host.persist();
-       }
-     }
-   }
- 
-   protected void processCommandReports(
-       HeartBeat heartbeat, String hostname, Clusters clusterFsm, long now)
-       throws AmbariException {
-     List<CommandReport> reports = heartbeat.getReports();
- 
-     // Cache HostRoleCommand entities because we will need them few times
-     List<Long> taskIds = new ArrayList<Long>();
-     for (CommandReport report : reports) {
-       taskIds.add(report.getTaskId());
-     }
-     Collection<HostRoleCommand> commands = actionManager.getTasks(taskIds);
- 
-     Iterator<HostRoleCommand> hostRoleCommandIterator = commands.iterator();
-     for (CommandReport report : reports) {
- 
-       Long clusterId = null;
-       if (report.getClusterName() != null) {
-         try {
-           Cluster cluster = clusterFsm.getCluster(report.getClusterName());
-           clusterId = Long.valueOf(cluster.getClusterId());
-         } catch (AmbariException e) {
-         }
-       }
- 
-       LOG.debug("Received command report: " + report);
-       // Fetch HostRoleCommand that corresponds to a given task ID
-       HostRoleCommand hostRoleCommand = hostRoleCommandIterator.next();
-       HostEntity hostEntity = hostDAO.findByName(hostname);
-       if (hostEntity == null) {
-         LOG.error("Received a command report and was unable to retrieve HostEntity for hostname = " + hostname);
-         continue;
-       }
- 
-       // Send event for final command reports for actions
-       if (RoleCommand.valueOf(report.getRoleCommand()) == RoleCommand.ACTIONEXECUTE &&
-           HostRoleStatus.valueOf(report.getStatus()).isCompletedState()) {
-         ActionFinalReportReceivedEvent event = new ActionFinalReportReceivedEvent(
-                 clusterId, hostname, report, false);
-         ambariEventPublisher.publish(event);
-       }
- 
-       // Skip sending events for command reports for ABORTed commands
-       if (hostRoleCommand.getStatus() == HostRoleStatus.ABORTED) {
-         continue;
-       }
-       if (hostRoleCommand.getStatus() == HostRoleStatus.QUEUED &&
-               report.getStatus().equals("IN_PROGRESS")) {
-         hostRoleCommand.setStartTime(now);
-       }
- 
-       // If the report indicates the keytab file was successfully transferred to a host or removed
-       // from a host, record this for future reference
-       if (Service.Type.KERBEROS.name().equalsIgnoreCase(report.getServiceName()) &&
-           Role.KERBEROS_CLIENT.name().equalsIgnoreCase(report.getRole()) &&
-           RoleCommand.CUSTOM_COMMAND.name().equalsIgnoreCase(report.getRoleCommand()) &&
-           RequestExecution.Status.COMPLETED.name().equalsIgnoreCase(report.getStatus())) {
- 
-         String customCommand = report.getCustomCommand();
- 
-         boolean adding = "SET_KEYTAB".equalsIgnoreCase(customCommand);
-         if (adding || "REMOVE_KEYTAB".equalsIgnoreCase(customCommand)) {
-           WriteKeytabsStructuredOut writeKeytabsStructuredOut;
-           try {
-             writeKeytabsStructuredOut = gson.fromJson(report.getStructuredOut(), WriteKeytabsStructuredOut.class);
-           } catch (JsonSyntaxException ex) {
-             //Json structure was incorrect do nothing, pass this data further for processing
-             writeKeytabsStructuredOut = null;
-           }
- 
-           if (writeKeytabsStructuredOut != null) {
-             Map<String, String> keytabs = writeKeytabsStructuredOut.getKeytabs();
-             if (keytabs != null) {
-               for (Map.Entry<String, String> entry : keytabs.entrySet()) {
-                 String principal = entry.getKey();
-                 if (!kerberosPrincipalHostDAO.exists(principal, hostEntity.getHostId())) {
-                   if (adding) {
-                     kerberosPrincipalHostDAO.create(principal, hostEntity.getHostId());
-                   } else if ("_REMOVED_".equalsIgnoreCase(entry.getValue())) {
-                     kerberosPrincipalHostDAO.remove(principal, hostEntity.getHostId());
-                   }
-                 }
-               }
-             }
-           }
-         }
-       }
- 
-       //pass custom START, STOP and RESTART
-       if (RoleCommand.ACTIONEXECUTE.toString().equals(report.getRoleCommand()) ||
-          (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) &&
-          !("RESTART".equals(report.getCustomCommand()) ||
-          "START".equals(report.getCustomCommand()) ||
-          "STOP".equals(report.getCustomCommand())))) {
-         continue;
-       }
- 
-       Cluster cl = clusterFsm.getCluster(report.getClusterName());
-       String service = report.getServiceName();
-       if (service == null || service.isEmpty()) {
-         throw new AmbariException("Invalid command report, service: " + service);
-       }
-       if (actionMetadata.getActions(service.toLowerCase()).contains(report.getRole())) {
-         LOG.debug(report.getRole() + " is an action - skip component lookup");
-       } else {
-         try {
-           Service svc = cl.getService(service);
-           ServiceComponent svcComp = svc.getServiceComponent(report.getRole());
-           ServiceComponentHost scHost = svcComp.getServiceComponentHost(hostname);
-           String schName = scHost.getServiceComponentName();
- 
-           if (report.getStatus().equals(HostRoleStatus.COMPLETED.toString())) {
- 
-             // Reading component version if it is present
-             if (StringUtils.isNotBlank(report.getStructuredOut())) {
-               ComponentVersionStructuredOut structuredOutput = null;
-               try {
-                 structuredOutput = gson.fromJson(report.getStructuredOut(), ComponentVersionStructuredOut.class);
-               } catch (JsonSyntaxException ex) {
-                 //Json structure for component version was incorrect
-                 //do nothing, pass this data further for processing
-               }
- 
-               String newVersion = structuredOutput == null ? null : structuredOutput.version;
- 
-               HostComponentVersionAdvertisedEvent event = new HostComponentVersionAdvertisedEvent(cl, scHost, newVersion);
-               versionEventPublisher.publish(event);
-             }
- 
-             // Updating stack version, if needed (this is not actually for express/rolling upgrades!)
-             if (scHost.getState().equals(State.UPGRADING)) {
-               scHost.setStackVersion(scHost.getDesiredStackVersion());
-             } else if ((report.getRoleCommand().equals(RoleCommand.START.toString()) ||
-                 (report.getRoleCommand().equals(RoleCommand.CUSTOM_COMMAND.toString()) &&
-                     ("START".equals(report.getCustomCommand()) ||
-                     "RESTART".equals(report.getCustomCommand()))))
-                 && null != report.getConfigurationTags()
-                 && !report.getConfigurationTags().isEmpty()) {
-               LOG.info("Updating applied config on service " + scHost.getServiceName() +
-                 ", component " + scHost.getServiceComponentName() + ", host " + scHost.getHostName());
-               scHost.updateActualConfigs(report.getConfigurationTags());
-               scHost.setRestartRequired(false);
-             }
-             // Necessary for resetting clients stale configs after starting service
-             if ((RoleCommand.INSTALL.toString().equals(report.getRoleCommand()) ||
-                 (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) &&
-                 "INSTALL".equals(report.getCustomCommand()))) && svcComp.isClientComponent()){
-               scHost.updateActualConfigs(report.getConfigurationTags());
-               scHost.setRestartRequired(false);
-             }
-             if (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) &&
-                 !("START".equals(report.getCustomCommand()) ||
-                  "STOP".equals(report.getCustomCommand()))) {
-               //do not affect states for custom commands except START and STOP
-               //lets status commands to be responsible for this
-               continue;
-             }
- 
-             if (RoleCommand.START.toString().equals(report.getRoleCommand()) ||
-                 (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) &&
-                     "START".equals(report.getCustomCommand()))) {
-               scHost.handleEvent(new ServiceComponentHostStartedEvent(schName,
-                   hostname, now));
-               scHost.setRestartRequired(false);
-             } else if (RoleCommand.STOP.toString().equals(report.getRoleCommand()) ||
-                 (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) &&
-                     "STOP".equals(report.getCustomCommand()))) {
-               scHost.handleEvent(new ServiceComponentHostStoppedEvent(schName,
-                   hostname, now));
-             } else {
-               scHost.handleEvent(new ServiceComponentHostOpSucceededEvent(schName,
-                   hostname, now));
-             }
-           } else if (report.getStatus().equals("FAILED")) {
- 
-             if (StringUtils.isNotBlank(report.getStructuredOut())) {
-               try {
-                 ComponentVersionStructuredOut structuredOutput = gson.fromJson(report.getStructuredOut(), ComponentVersionStructuredOut.class);
- 
-                 if (null != structuredOutput.upgradeDirection) {
-                   // TODO: backward compatibility: now state is set to FAILED also during downgrade
-                   scHost.setUpgradeState(UpgradeState.FAILED);
-                 }
-               } catch (JsonSyntaxException ex) {
-                 LOG.warn("Structured output was found, but not parseable: {}", report.getStructuredOut());
-               }
-             }
- 
-             LOG.warn("Operation failed - may be retried. Service component host: "
-                 + schName + ", host: " + hostname + " Action id" + report.getActionId());
-             if (actionManager.isInProgressCommand(report)) {
-               scHost.handleEvent(new ServiceComponentHostOpFailedEvent
-                 (schName, hostname, now));
-             } else {
-               LOG.info("Received report for a command that is no longer active. " + report);
-             }
-           } else if (report.getStatus().equals("IN_PROGRESS")) {
-             scHost.handleEvent(new ServiceComponentHostOpInProgressEvent(schName,
-                 hostname, now));
-           }
-         } catch (ServiceComponentNotFoundException scnex) {
-           LOG.warn("Service component not found ", scnex);
-         } catch (InvalidStateTransitionException ex) {
-           if (LOG.isDebugEnabled()) {
-             LOG.warn("State machine exception.", ex);
-           } else {
-             LOG.warn("State machine exception. " + ex.getMessage());
-           }
-         }
-       }
-     }
- 
-     //Update state machines from reports
-     actionManager.processTaskResponse(hostname, reports, commands);
-   }
- 
-   protected void processStatusReports(HeartBeat heartbeat,
-                                       String hostname,
-                                       Clusters clusterFsm)
-       throws AmbariException {
-     Set<Cluster> clusters = clusterFsm.getClustersForHost(hostname);
-     for (Cluster cl : clusters) {
-       for (ComponentStatus status : heartbeat.componentStatus) {
-         if (status.getClusterName().equals(cl.getClusterName())) {
-           try {
-             Service svc = cl.getService(status.getServiceName());
- 
-             String componentName = status.getComponentName();
-             if (svc.getServiceComponents().containsKey(componentName)) {
-               ServiceComponent svcComp = svc.getServiceComponent(
-                   componentName);
-               ServiceComponentHost scHost = svcComp.getServiceComponentHost(
-                   hostname);
-               State prevState = scHost.getState();
-               State liveState = State.valueOf(State.class, status.getStatus());
-               if (prevState.equals(State.INSTALLED)
-                   || prevState.equals(State.STARTED)
-                   || prevState.equals(State.STARTING)
-                   || prevState.equals(State.STOPPING)
-                   || prevState.equals(State.UNKNOWN)) {
-                 scHost.setState(liveState); //TODO direct status set breaks state machine sometimes !!!
-                 if (!prevState.equals(liveState)) {
-                   LOG.info("State of service component " + componentName
-                       + " of service " + status.getServiceName()
-                       + " of cluster " + status.getClusterName()
-                       + " has changed from " + prevState + " to " + liveState
-                       + " at host " + hostname);
-                 }
-               }
- 
-               SecurityState prevSecurityState = scHost.getSecurityState();
-               SecurityState currentSecurityState = SecurityState.valueOf(status.getSecurityState());
-               if((prevSecurityState != currentSecurityState)) {
-                 if(prevSecurityState.isEndpoint()) {
-                   scHost.setSecurityState(currentSecurityState);
-                   LOG.info(String.format("Security of service component %s of service %s of cluster %s " +
-                           "has changed from %s to %s on host %s",
-                       componentName, status.getServiceName(), status.getClusterName(), prevSecurityState,
-                       currentSecurityState, hostname));
-                 }
-                 else {
-                   LOG.debug(String.format("Security of service component %s of service %s of cluster %s " +
-                           "has changed from %s to %s on host %s but will be ignored since %s is a " +
-                           "transitional state",
-                       componentName, status.getServiceName(), status.getClusterName(),
-                       prevSecurityState, currentSecurityState, hostname, prevSecurityState));
-                 }
-               }
- 
-               if (null != status.getStackVersion() && !status.getStackVersion().isEmpty()) {
-                 scHost.setStackVersion(gson.fromJson(status.getStackVersion(), StackId.class));
-               }
- 
-               if (null != status.getConfigTags()) {
-                 scHost.updateActualConfigs(status.getConfigTags());
-               }
- 
-               Map<String, Object> extra = status.getExtra();
-               if (null != extra && !extra.isEmpty()) {
-                 try {
-                   if (extra.containsKey("processes")) {
-                     @SuppressWarnings("unchecked")
-                     List<Map<String, String>> list = (List<Map<String, String>>) extra.get("processes");
-                     scHost.setProcesses(list);
-                   }
-                   if (extra.containsKey("version")) {
-                     String version = extra.get("version").toString();
- 
-                     HostComponentVersionAdvertisedEvent event = new HostComponentVersionAdvertisedEvent(cl, scHost, version);
-                     versionEventPublisher.publish(event);
-                   }
- 
-                 } catch (Exception e) {
-                   LOG.error("Could not access extra JSON for " +
-                       scHost.getServiceComponentName() + " from " +
-                       scHost.getHostName() + ": " + status.getExtra() +
-                       " (" + e.getMessage() + ")");
-                 }
-               }
- 
-               this.heartbeatMonitor.getAgentRequests()
-                   .setExecutionDetailsRequest(hostname, componentName, status.getSendExecCmdDet());
-             } else {
-               // TODO: What should be done otherwise?
-             }
-           } catch (ServiceNotFoundException e) {
-             LOG.warn("Received a live status update for a non-initialized"
-                 + " service"
-                 + ", clusterName=" + status.getClusterName()
-                 + ", serviceName=" + status.getServiceName());
-             // FIXME ignore invalid live update and continue for now?
-             continue;
-           } catch (ServiceComponentNotFoundException e) {
-             LOG.warn("Received a live status update for a non-initialized"
-                 + " servicecomponent"
-                 + ", clusterName=" + status.getClusterName()
-                 + ", serviceName=" + status.getServiceName()
-                 + ", componentName=" + status.getComponentName());
-             // FIXME ignore invalid live update and continue for now?
-             continue;
-           } catch (ServiceComponentHostNotFoundException e) {
-             LOG.warn("Received a live status update for a non-initialized"
-                 + " service"
-                 + ", clusterName=" + status.getClusterName()
-                 + ", serviceName=" + status.getServiceName()
-                 + ", componentName=" + status.getComponentName()
-                 + ", hostname=" + hostname);
-             // FIXME ignore invalid live update and continue for now?
-             continue;
-           } catch (RuntimeException e) {
-             LOG.warn("Received a live status with invalid payload"
-                 + " service"
-                 + ", clusterName=" + status.getClusterName()
-                 + ", serviceName=" + status.getServiceName()
-                 + ", componentName=" + status.getComponentName()
-                 + ", hostname=" + hostname
-                 + ", error=" + e.getMessage());
-             continue;
-           }
-         }
-       }
-     }
-   }
--
    /**
     * Adds commands from action queue to a heartbeat response.
     */

http://git-wip-us.apache.org/repos/asf/ambari/blob/e06d95d1/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index 221b83d,1cb935b..c63d043
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@@ -43,9 -34,10 +43,10 @@@ import org.apache.ambari.server.orm.ent
  import org.apache.ambari.server.security.ClientSecurityType;
  import org.apache.ambari.server.security.authorization.LdapServerProperties;
  import org.apache.ambari.server.security.authorization.jwt.JwtAuthenticationProperties;
 +import org.apache.ambari.server.security.encryption.CertificateUtils;
  import org.apache.ambari.server.security.encryption.CredentialProvider;
  import org.apache.ambari.server.state.stack.OsFamily;
 -import org.apache.ambari.server.security.encryption.CertificateUtils;
+ import org.apache.ambari.server.utils.AmbariPath;
  import org.apache.ambari.server.utils.Parallel;
  import org.apache.ambari.server.utils.ShellCommandUtil;
  import org.apache.commons.io.FileUtils;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e06d95d1/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/e06d95d1/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/e06d95d1/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
----------------------------------------------------------------------
diff --cc ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
index b6d51af,c62352a..e29e23e
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
@@@ -2366,160 -1394,7 +1394,6 @@@ public class TestHeartbeatHandler 
      return componentStatus1;
    }
  
-   private HeartBeatHandler getHeartBeatHandler(ActionManager am, ActionQueue aq)
-       throws InvalidStateTransitionException, AmbariException {
-     HeartBeatHandler handler = new HeartBeatHandler(clusters, aq, am, injector);
-     Register reg = new Register();
-     HostInfo hi = new HostInfo();
-     hi.setHostName(DummyHostname1);
-     hi.setOS(DummyOs);
-     hi.setOSRelease(DummyOSRelease);
-     reg.setHostname(DummyHostname1);
-     reg.setResponseId(0);
-     reg.setHardwareProfile(hi);
-     reg.setAgentVersion(metaInfo.getServerVersion());
-     handler.handleRegistration(reg);
-     return handler;
-   }
- 
-   private Cluster getDummyCluster()
-       throws AmbariException {
-     StackEntity stackEntity = stackDAO.find(HDP_22_STACK.getStackName(), HDP_22_STACK.getStackVersion());
-     org.junit.Assert.assertNotNull(stackEntity);
- 
-     // Create the cluster
-     ResourceTypeEntity resourceTypeEntity = new ResourceTypeEntity();
-     resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
-     resourceTypeEntity.setName(ResourceType.CLUSTER.name());
-     resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
- 
-     ResourceEntity resourceEntity = new ResourceEntity();
-     resourceEntity.setResourceType(resourceTypeEntity);
- 
-     ClusterEntity clusterEntity = new ClusterEntity();
-     clusterEntity.setClusterName(DummyCluster);
-     clusterEntity.setClusterInfo("test_cluster_info1");
-     clusterEntity.setResource(resourceEntity);
-     clusterEntity.setDesiredStack(stackEntity);
- 
-     clusterDAO.create(clusterEntity);
- 
-     StackId stackId = new StackId(DummyStackId);
- 
-     Cluster cluster = clusters.getCluster(DummyCluster);
- 
-     cluster.setDesiredStackVersion(stackId);
-     cluster.setCurrentStackVersion(stackId);
-     helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
-     cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
-         RepositoryVersionState.INSTALLING);
- 
-     Set<String> hostNames = new HashSet<String>(){{
-       add(DummyHostname1);
-     }};
- 
-     Map<String, String> hostAttributes = new HashMap<String, String>();
-     hostAttributes.put("os_family", "redhat");
-     hostAttributes.put("os_release_version", "6.3");
- 
-     List<HostEntity> hostEntities = new ArrayList<HostEntity>();
-     for(String hostName : hostNames) {
-       clusters.addHost(hostName);
-       Host host = clusters.getHost(hostName);
-       host.setHostAttributes(hostAttributes);
-       host.persist();
- 
-       HostEntity hostEntity = hostDAO.findByName(hostName);
-       Assert.assertNotNull(hostEntity);
-       hostEntities.add(hostEntity);
-     }
-     clusterEntity.setHostEntities(hostEntities);
-     clusters.mapHostsToCluster(hostNames, DummyCluster);
- 
-     return cluster;
-   }
- 
-   @Test
-   @SuppressWarnings("unchecked")
-   public void testCommandStatusProcesses() throws Exception {
-     Cluster cluster = getDummyCluster();
-     Service hdfs = cluster.addService(HDFS);
-     hdfs.persist();
-     hdfs.addServiceComponent(DATANODE).persist();
-     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-     hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
- 
-     ActionQueue aq = new ActionQueue();
- 
-     HeartBeat hb = new HeartBeat();
-     hb.setTimestamp(System.currentTimeMillis());
-     hb.setResponseId(0);
-     hb.setHostname(DummyHostname1);
-     hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-     hb.setReports(new ArrayList<CommandReport>());
- 
-     List<Map<String, String>> procs = new ArrayList<Map<String, String>>();
-     Map<String, String> proc1info = new HashMap<String, String>();
-     proc1info.put("name", "a");
-     proc1info.put("status", "RUNNING");
-     procs.add(proc1info);
- 
-     Map<String, String> proc2info = new HashMap<String, String>();
-     proc2info.put("name", "b");
-     proc2info.put("status", "NOT_RUNNING");
-     procs.add(proc2info);
- 
-     Map<String, Object> extra = new HashMap<String, Object>();
-     extra.put("processes", procs);
- 
-     ArrayList<ComponentStatus> componentStatuses = new ArrayList<ComponentStatus>();
-     ComponentStatus componentStatus1 = new ComponentStatus();
-     componentStatus1.setClusterName(DummyCluster);
-     componentStatus1.setServiceName(HDFS);
-     componentStatus1.setMessage(DummyHostStatus);
-     componentStatus1.setStatus(State.STARTED.name());
-     componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
-     componentStatus1.setComponentName(DATANODE);
- 
-     componentStatus1.setExtra(extra);
-     componentStatuses.add(componentStatus1);
-     hb.setComponentStatus(componentStatuses);
- 
-     final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
-             Role.DATANODE, null, null);
- 
-     ActionManager am = getMockActionManager();
-     expect(am.getTasks(anyObject(List.class))).andReturn(
-             new ArrayList<HostRoleCommand>() {{
-               add(command);
-             }}).anyTimes();
-     replay(am);
- 
-     HeartBeatHandler handler = getHeartBeatHandler(am, aq);
-     handler.handleHeartBeat(hb);
-     ServiceComponentHost sch = hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
- 
-     Assert.assertEquals(Integer.valueOf(2), Integer.valueOf(sch.getProcesses().size()));
- 
-     hb = new HeartBeat();
-     hb.setTimestamp(System.currentTimeMillis());
-     hb.setResponseId(1);
-     hb.setHostname(DummyHostname1);
-     hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-     hb.setReports(new ArrayList<CommandReport>());
- 
-     componentStatus1 = new ComponentStatus();
-     componentStatus1.setClusterName(DummyCluster);
-     componentStatus1.setServiceName(HDFS);
-     componentStatus1.setMessage(DummyHostStatus);
-     componentStatus1.setStatus(State.STARTED.name());
-     componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
-     componentStatus1.setComponentName(DATANODE);
-     hb.setComponentStatus(Collections.singletonList(componentStatus1));
- 
-     handler.handleHeartBeat(hb);
-   }
--
    @Test
    @SuppressWarnings("unchecked")
    public void testCommandStatusProcesses_empty() throws Exception {


[04/11] ambari git commit: AMBARI-14798. Users cannot login with uppercase username (Oliver Szabo via rlevas)

Posted by nc...@apache.org.
AMBARI-14798. Users cannot login with uppercase username (Oliver Szabo via rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2b42559f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2b42559f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2b42559f

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 2b42559f516d0f45072174dac5f9d230b8e7c31c
Parents: f7ebe91
Author: Oliver Szabo <os...@hortonworks.com>
Authored: Tue Feb 23 11:14:02 2016 -0500
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Tue Feb 23 11:14:02 2016 -0500

----------------------------------------------------------------------
 .../api/services/ActiveWidgetLayoutService.java | 10 +--
 .../api/services/UserAuthorizationService.java  |  4 +-
 .../api/services/UserPrivilegeService.java      |  3 +-
 .../ambari/server/api/services/UserService.java |  3 +-
 .../services/ActiveWidgetLayoutServiceTest.java | 76 ++++++++++++++++++++
 .../services/UserAuthorizationServiceTest.java  | 12 ++++
 .../api/services/UserPrivilegeServiceTest.java  | 13 ++++
 .../server/api/services/UserServiceTest.java    | 71 ++++++++++++++++++
 8 files changed, 182 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2b42559f/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutService.java
index c4403df..a0c3386 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutService.java
@@ -17,25 +17,21 @@
  */
 package org.apache.ambari.server.api.services;
 
-import com.sun.jersey.core.util.Base64;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.controller.spi.Resource;
 
-import javax.ws.rs.DELETE;
 import javax.ws.rs.GET;
-import javax.ws.rs.POST;
 import javax.ws.rs.PUT;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.UriInfo;
-import java.nio.charset.Charset;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.commons.lang.StringUtils;
+
 /**
  * WidgetLayout Service
  */
@@ -73,7 +69,7 @@ public class ActiveWidgetLayoutService extends BaseService {
 
   private ResourceInstance createResource(String widgetLayoutId) {
     Map<Resource.Type,String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.User, userName);
+    mapIds.put(Resource.Type.User, StringUtils.lowerCase(userName));
     return createResource(Resource.Type.ActiveWidgetLayout, mapIds);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2b42559f/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserAuthorizationService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserAuthorizationService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserAuthorizationService.java
index 6861d3d..c288fdb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserAuthorizationService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserAuthorizationService.java
@@ -32,6 +32,8 @@ import javax.ws.rs.core.UriInfo;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.commons.lang.StringUtils;
+
 /**
  * UserAuthorizationService is a read-only service responsible for user authorization resource requests.
  * <p/>
@@ -94,7 +96,7 @@ public class UserAuthorizationService extends BaseService {
    */
   protected ResourceInstance createAuthorizationResource(String authorizationId) {
     Map<Resource.Type, String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.User, username);
+    mapIds.put(Resource.Type.User, StringUtils.lowerCase(username));
     mapIds.put(Resource.Type.UserAuthorization, authorizationId);
     return createResource(Resource.Type.UserAuthorization, mapIds);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/2b42559f/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserPrivilegeService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserPrivilegeService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserPrivilegeService.java
index 80769cf..86c4995 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserPrivilegeService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserPrivilegeService.java
@@ -29,6 +29,7 @@ import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.UriInfo;
 
+import org.apache.commons.lang.StringUtils;
 /**
  *  Service responsible for user privilege resource requests.
  */
@@ -72,7 +73,7 @@ public class UserPrivilegeService extends PrivilegeService {
   @Override
   protected ResourceInstance createPrivilegeResource(String privilegeId) {
     final Map<Resource.Type, String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.User, userName);
+    mapIds.put(Resource.Type.User, StringUtils.lowerCase(userName));
     mapIds.put(Resource.Type.UserPrivilege, privilegeId);
     return createResource(Resource.Type.UserPrivilege, mapIds);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/2b42559f/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java
index fea5eca..c46c373 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java
@@ -31,6 +31,7 @@ import javax.ws.rs.core.UriInfo;
 
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.controller.spi.Resource;
+
 import org.apache.commons.lang.StringUtils;
 
 import java.util.Collections;
@@ -171,6 +172,6 @@ public class UserService extends BaseService {
    */
   private ResourceInstance createUserResource(String userName) {
     return createResource(Resource.Type.User,
-        Collections.singletonMap(Resource.Type.User, userName));
+        Collections.singletonMap(Resource.Type.User, StringUtils.lowerCase(userName)));
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/2b42559f/ambari-server/src/test/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutServiceTest.java
new file mode 100644
index 0000000..c459f7c
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutServiceTest.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.api.services;
+
+import org.apache.ambari.server.api.resources.ResourceInstance;
+import org.apache.ambari.server.controller.spi.Resource;
+
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.ambari.server.orm.entities.WidgetLayoutEntity;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Unit tests for ActiveWidgetLayoutService.
+ */
+public class ActiveWidgetLayoutServiceTest {
+
+  @Test
+  public void testCreateResourceWithUppercaseUsername() {
+    // GIVEN
+    ActiveWidgetLayoutService activeWidgetLayoutService = new TestActiveWidgetLayoutService("MyUser");
+    // WHEN
+    Response response = activeWidgetLayoutService.getServices(null, null, null);
+    // THEN
+    assertEquals("myuser", ((WidgetLayoutEntity) response.getEntity()).getUserName());
+  }
+
+  private class TestActiveWidgetLayoutService extends ActiveWidgetLayoutService {
+    public TestActiveWidgetLayoutService(String username) {
+      super(username);
+    }
+
+    @Override
+    protected Response handleRequest(HttpHeaders headers, String body, UriInfo uriInfo,
+                                     Request.Type requestType, final ResourceInstance resource) {
+      return new Response() {
+        @Override
+        public Object getEntity() {
+          WidgetLayoutEntity entity = new WidgetLayoutEntity();
+          entity.setUserName(resource.getKeyValueMap().get(Resource.Type.User));
+          return entity;
+        }
+
+        @Override
+        public int getStatus() {
+          return 0;
+        }
+
+        @Override
+        public MultivaluedMap<String, Object> getMetadata() {
+          return null;
+        }
+      };
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2b42559f/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserAuthorizationServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserAuthorizationServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserAuthorizationServiceTest.java
index 9627d19..c3270bb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserAuthorizationServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserAuthorizationServiceTest.java
@@ -21,6 +21,8 @@ package org.apache.ambari.server.api.services;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
 import org.apache.ambari.server.api.services.serializers.ResultSerializer;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.junit.Test;
 
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.UriInfo;
@@ -54,6 +56,16 @@ public class UserAuthorizationServiceTest extends BaseServiceTest {
     return listInvocations;
   }
 
+  @Test
+  public void testCreateAuthorizationResourceWithUppercaseUsername() {
+    // GIVEN
+    UserAuthorizationService userAuthorizationService= new UserAuthorizationService("Jdoe");
+    // WHEN
+    ResourceInstance result = userAuthorizationService.createAuthorizationResource("id");
+    // THEN
+    assertEquals("jdoe", result.getKeyValueMap().get(Resource.Type.User));
+  }
+
 
   private class TestUserAuthorizationService extends UserAuthorizationService {
     private String id;

http://git-wip-us.apache.org/repos/asf/ambari/blob/2b42559f/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserPrivilegeServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserPrivilegeServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserPrivilegeServiceTest.java
index 269315a..db2d38a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserPrivilegeServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserPrivilegeServiceTest.java
@@ -33,10 +33,13 @@ import junit.framework.Assert;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
 import org.apache.ambari.server.api.services.serializers.ResultSerializer;
+import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.Resource.Type;
 import org.easymock.EasyMock;
 import org.junit.Test;
 
+import static org.junit.Assert.assertEquals;
+
 /**
  * Unit tests for GroupService.
  */
@@ -82,6 +85,16 @@ public class UserPrivilegeServiceTest extends BaseServiceTest {
     }
   }
 
+  @Test
+  public void testCreatePrivilegeResourcesWithUppercaseUsername() {
+    // GIVEN
+    UserPrivilegeService userPrivilegeService = new UserPrivilegeService("User");
+    // WHEN
+    ResourceInstance result = userPrivilegeService.createPrivilegeResource("test");
+    // THEN
+    assertEquals( "user", result.getKeyValueMap().get(Resource.Type.User));
+  }
+
   private class TestUserPrivilegeService extends UserPrivilegeService {
 
     public TestUserPrivilegeService() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/2b42559f/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserServiceTest.java
new file mode 100644
index 0000000..0ed0a66
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserServiceTest.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.api.services;
+
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.ambari.server.api.resources.ResourceInstance;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.orm.entities.UserEntity;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Unit tests for UserService.
+ */
+public class UserServiceTest {
+
+  @Test
+  public void testCreateResourcesWithUppercaseUsername() {
+    // GIVEN
+    UserService userService = new TestUserService();
+    // WHEN
+    Response response = userService.getUser(null, null, null, "MyUser");
+    // THEN
+    assertEquals("myuser", ((UserEntity) response.getEntity()).getUserName());
+  }
+
+  class TestUserService extends UserService {
+    @Override
+    protected Response handleRequest(HttpHeaders headers, String body, UriInfo uriInfo,
+                                     Request.Type requestType, final ResourceInstance resource) {
+      return new Response() {
+        @Override
+        public Object getEntity() {
+          UserEntity entity = new UserEntity();
+          entity.setUserName(resource.getKeyValueMap().get(Resource.Type.User));
+          return entity;
+        }
+
+        @Override
+        public int getStatus() {
+          return 0;
+        }
+
+        @Override
+        public MultivaluedMap<String, Object> getMetadata() {
+          return null;
+        }
+      };
+    }
+  }
+}


[07/11] ambari git commit: AMBARI-15135. [Ambari tarballs] ambari-server java-side should support running from custom root (aonishuk)

Posted by nc...@apache.org.
AMBARI-15135. [Ambari tarballs] ambari-server java-side should support running from custom root (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9d7ff5f1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9d7ff5f1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9d7ff5f1

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 9d7ff5f14f44a58ba280a964a51a85e9f654b27f
Parents: 18d5a69
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Feb 23 20:06:24 2016 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Feb 23 20:06:24 2016 +0200

----------------------------------------------------------------------
 Committed | 0
 1 file changed, 0 insertions(+), 0 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9d7ff5f1/Committed
----------------------------------------------------------------------
diff --git a/Committed b/Committed
deleted file mode 100644
index e69de29..0000000


[02/11] ambari git commit: AMBARI-14798. Users cannot login with uppercase username (Oliver Szabo via rlevas)

Posted by nc...@apache.org.
AMBARI-14798. Users cannot login with uppercase username (Oliver Szabo via rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e1ca2416
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e1ca2416
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e1ca2416

Branch: refs/heads/branch-dev-patch-upgrade
Commit: e1ca24160027d73cf27b90b35bc0757442a0cbdd
Parents: ea6a7a6
Author: Oliver Szabo <os...@hortonworks.com>
Authored: Tue Feb 23 11:12:00 2016 -0500
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Tue Feb 23 11:12:05 2016 -0500

----------------------------------------------------------------------
 .../server/api/services/ActiveWidgetLayoutService.java | 10 +++-------
 .../server/api/services/UserAuthorizationService.java  |  4 +++-
 .../server/api/services/UserPrivilegeService.java      |  3 ++-
 .../apache/ambari/server/api/services/UserService.java |  3 ++-
 .../api/services/UserAuthorizationServiceTest.java     | 12 ++++++++++++
 .../server/api/services/UserPrivilegeServiceTest.java  | 13 +++++++++++++
 6 files changed, 35 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e1ca2416/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutService.java
index c4403df..a0c3386 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutService.java
@@ -17,25 +17,21 @@
  */
 package org.apache.ambari.server.api.services;
 
-import com.sun.jersey.core.util.Base64;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.controller.spi.Resource;
 
-import javax.ws.rs.DELETE;
 import javax.ws.rs.GET;
-import javax.ws.rs.POST;
 import javax.ws.rs.PUT;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.UriInfo;
-import java.nio.charset.Charset;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.commons.lang.StringUtils;
+
 /**
  * WidgetLayout Service
  */
@@ -73,7 +69,7 @@ public class ActiveWidgetLayoutService extends BaseService {
 
   private ResourceInstance createResource(String widgetLayoutId) {
     Map<Resource.Type,String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.User, userName);
+    mapIds.put(Resource.Type.User, StringUtils.lowerCase(userName));
     return createResource(Resource.Type.ActiveWidgetLayout, mapIds);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e1ca2416/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserAuthorizationService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserAuthorizationService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserAuthorizationService.java
index 6861d3d..c288fdb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserAuthorizationService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserAuthorizationService.java
@@ -32,6 +32,8 @@ import javax.ws.rs.core.UriInfo;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.commons.lang.StringUtils;
+
 /**
  * UserAuthorizationService is a read-only service responsible for user authorization resource requests.
  * <p/>
@@ -94,7 +96,7 @@ public class UserAuthorizationService extends BaseService {
    */
   protected ResourceInstance createAuthorizationResource(String authorizationId) {
     Map<Resource.Type, String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.User, username);
+    mapIds.put(Resource.Type.User, StringUtils.lowerCase(username));
     mapIds.put(Resource.Type.UserAuthorization, authorizationId);
     return createResource(Resource.Type.UserAuthorization, mapIds);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e1ca2416/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserPrivilegeService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserPrivilegeService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserPrivilegeService.java
index 80769cf..86c4995 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserPrivilegeService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserPrivilegeService.java
@@ -29,6 +29,7 @@ import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.UriInfo;
 
+import org.apache.commons.lang.StringUtils;
 /**
  *  Service responsible for user privilege resource requests.
  */
@@ -72,7 +73,7 @@ public class UserPrivilegeService extends PrivilegeService {
   @Override
   protected ResourceInstance createPrivilegeResource(String privilegeId) {
     final Map<Resource.Type, String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.User, userName);
+    mapIds.put(Resource.Type.User, StringUtils.lowerCase(userName));
     mapIds.put(Resource.Type.UserPrivilege, privilegeId);
     return createResource(Resource.Type.UserPrivilege, mapIds);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e1ca2416/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java
index fea5eca..c46c373 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java
@@ -31,6 +31,7 @@ import javax.ws.rs.core.UriInfo;
 
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.controller.spi.Resource;
+
 import org.apache.commons.lang.StringUtils;
 
 import java.util.Collections;
@@ -171,6 +172,6 @@ public class UserService extends BaseService {
    */
   private ResourceInstance createUserResource(String userName) {
     return createResource(Resource.Type.User,
-        Collections.singletonMap(Resource.Type.User, userName));
+        Collections.singletonMap(Resource.Type.User, StringUtils.lowerCase(userName)));
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e1ca2416/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserAuthorizationServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserAuthorizationServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserAuthorizationServiceTest.java
index 9627d19..c3270bb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserAuthorizationServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserAuthorizationServiceTest.java
@@ -21,6 +21,8 @@ package org.apache.ambari.server.api.services;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
 import org.apache.ambari.server.api.services.serializers.ResultSerializer;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.junit.Test;
 
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.UriInfo;
@@ -54,6 +56,16 @@ public class UserAuthorizationServiceTest extends BaseServiceTest {
     return listInvocations;
   }
 
+  @Test
+  public void testCreateAuthorizationResourceWithUppercaseUsername() {
+    // GIVEN
+    UserAuthorizationService userAuthorizationService= new UserAuthorizationService("Jdoe");
+    // WHEN
+    ResourceInstance result = userAuthorizationService.createAuthorizationResource("id");
+    // THEN
+    assertEquals("jdoe", result.getKeyValueMap().get(Resource.Type.User));
+  }
+
 
   private class TestUserAuthorizationService extends UserAuthorizationService {
     private String id;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e1ca2416/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserPrivilegeServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserPrivilegeServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserPrivilegeServiceTest.java
index 269315a..db2d38a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserPrivilegeServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserPrivilegeServiceTest.java
@@ -33,10 +33,13 @@ import junit.framework.Assert;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
 import org.apache.ambari.server.api.services.serializers.ResultSerializer;
+import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.Resource.Type;
 import org.easymock.EasyMock;
 import org.junit.Test;
 
+import static org.junit.Assert.assertEquals;
+
 /**
  * Unit tests for GroupService.
  */
@@ -82,6 +85,16 @@ public class UserPrivilegeServiceTest extends BaseServiceTest {
     }
   }
 
+  @Test
+  public void testCreatePrivilegeResourcesWithUppercaseUsername() {
+    // GIVEN
+    UserPrivilegeService userPrivilegeService = new UserPrivilegeService("User");
+    // WHEN
+    ResourceInstance result = userPrivilegeService.createPrivilegeResource("test");
+    // THEN
+    assertEquals( "user", result.getKeyValueMap().get(Resource.Type.User));
+  }
+
   private class TestUserPrivilegeService extends UserPrivilegeService {
 
     public TestUserPrivilegeService() {


[05/11] ambari git commit: AMBARI-15134. Issues with Login Message dialog Round #2 (alexantonenko)

Posted by nc...@apache.org.
AMBARI-15134. Issues with Login Message dialog Round #2 (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/df0b18ca
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/df0b18ca
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/df0b18ca

Branch: refs/heads/branch-dev-patch-upgrade
Commit: df0b18ca5da59d9ed6bc4340ac09eb51db9963a4
Parents: 2b42559
Author: Alex Antonenko <hi...@gmail.com>
Authored: Tue Feb 23 18:29:33 2016 +0200
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Tue Feb 23 19:17:41 2016 +0200

----------------------------------------------------------------------
 .../loginActivities/LoginMessageMainCtrl.js     | 51 ++++++++++++++------
 .../ui/admin-web/app/scripts/i18n.config.js     |  4 +-
 .../app/views/loginActivities/loginMessage.html | 14 +++---
 ambari-web/app/router.js                        | 11 ++++-
 4 files changed, 55 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/df0b18ca/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/loginActivities/LoginMessageMainCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/loginActivities/LoginMessageMainCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/loginActivities/LoginMessageMainCtrl.js
index 763bd59..911bb0b 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/loginActivities/LoginMessageMainCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/loginActivities/LoginMessageMainCtrl.js
@@ -22,19 +22,27 @@ angular.module('ambariAdminConsole')
     var $t = $translate.instant,
       targetUrl = '/loginActivities';
 
-    $scope.status = false;
-    $scope.motdExists = false;
-    $scope.text = "";
-    $scope.buttonText = "OK";
-    $scope.submitDisabled = true;
+    $scope.getMOTD = function() {
+      $http.get('/api/v1/settings/motd').then(function (res) {
+        $scope.motdExists = true;
+        var
+          response = JSON.parse(res.data.Settings.content.replace(/\n/g, "\\n")),
+          lt = /&lt;/g,
+          gt = /&gt;/g,
+          ap = /&#39;/g,
+          ic = /&#34;/g;
 
-    $http.get('/api/v1/settings/motd').then(function (res) {
-      $scope.motdExists = true;
-      var response = JSON.parse(res.data.Settings.content.replace(/\n/g, "\\n"));
-      $scope.text = response.text ? response.text : "";
-      $scope.buttonText = response.button ? response.button : "";
-      $scope.status = response.status && response.status == "true" ? true : false;
-    });
+        $scope.text = response.text ? response.text.toString().replace(lt, "<").replace(gt, ">").replace(ap, "'").replace(ic, '"') : "";
+        $scope.buttonText = response.button ? response.button.toString().replace(lt, "<").replace(gt, ">").replace(ap, "'").replace(ic, '"') : "OK";
+        $scope.status = response.status && response.status == "true" ? true : false;
+      }, function(response) {
+        $scope.status = false;
+        $scope.motdExists = false;
+        $scope.text = "";
+        $scope.buttonText = "OK";
+      });
+      $scope.submitDisabled = true;
+    };
 
     $scope.inputChangeEvent = function(){
       $scope.submitDisabled = false;
@@ -43,6 +51,10 @@ angular.module('ambariAdminConsole')
       $scope.submitDisabled = false;
     };
 
+    $scope.cancel = function() {
+      $scope.getMOTD();
+    };
+
     $scope.$watch(function(scope) {
       return scope.submitDisabled;
     }, function(submitDisabled) {
@@ -50,10 +62,21 @@ angular.module('ambariAdminConsole')
     });
 
     $scope.saveLoginMsg = function(targetUrl) {
-      var method = $scope.motdExists ? 'PUT' : 'POST';
+      var
+        method = $scope.motdExists ? 'PUT' : 'POST',
+        text = "",
+        buttonText = "",
+        lt = /</g,
+        gt = />/g,
+        ap = /'/g,
+        ic = /"/g;
+
+      text = $scope.text.toString().replace(lt, "&lt;").replace(gt, "&gt;").replace(ap, "&#39;").replace(ic, "&#34;");
+      buttonText = $scope.buttonText ? $scope.buttonText.toString().replace(lt, "&lt;").replace(gt, "&gt;").replace(ap, "&#39;").replace(ic, "&#34;") : $scope.buttonText;
+
       var data = {
         'Settings' : {
-          'content' : '{"text":"' + $scope.text + '", "button":"' + $scope.buttonText + '", "status":"' + $scope.status + '"}',
+          'content' : '{"text":"' + text + '", "button":"' + buttonText + '", "status":"' + $scope.status + '"}',
           'name' : 'motd',
           'setting_type' : 'ambari-server'
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/df0b18ca/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
index 0c67831..2035647 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
@@ -87,9 +87,9 @@ angular.module('ambariAdminConsole')
         'loginActivities':'Login Activities',
         'loginMessage': 'Login Message',
         'loginMessage.placeholder': 'Please enter login message',
-        'buttonText.placeholder': 'Please enter text for the "ok" button',
+        'buttonText.placeholder': 'Please enter button text',
         'homeDirectory': 'Home Directory',
-        'onlySimpleChars': 'Must contain only simple characters.',
+        'notEmpty': 'These field cannot be empty',
         'saveError': 'Save error',
         'message': 'Message',
         'buttonText': 'Button',

http://git-wip-us.apache.org/repos/asf/ambari/blob/df0b18ca/ambari-admin/src/main/resources/ui/admin-web/app/views/loginActivities/loginMessage.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/loginActivities/loginMessage.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/loginActivities/loginMessage.html
index 96217f5..1374f00 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/loginActivities/loginMessage.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/loginActivities/loginMessage.html
@@ -17,7 +17,7 @@
 -->
 
 <br/>
-<div class="login-message-pane" ng-controller="LoginMessageMainCtrl">
+<div class="login-message-pane" data-ng-init="getMOTD()" ng-controller="LoginMessageMainCtrl">
   <form class="form-horizontal" novalidate name="form" autocomplete="off">
     <div class="well">
       <fieldset>
@@ -37,7 +37,6 @@
                  placeholder="{{'common.loginActivities.loginMessage.placeholder' | translate}}"
                  ng-model="text"
                  ng-change="inputChangeEvent()"
-                 ng-pattern="/^([a-zA-Z0-9._\s]+)$/"
                  ng-disabled="!status"
                  autocomplete="off">
             </textarea>
@@ -46,7 +45,7 @@
             </div>
           </div>
         </div>
-        <div class="form-group" ng-class="{'has-error' : (form.login_text.$error.pattern) && form.submitted}">
+        <div class="form-group" ng-class="{'has-error' : (form.button_text.$error.pattern) && form.submitted}">
           <label class="col-sm-2 control-label">{{'common.loginActivities.buttonText' | translate}}</label>
           <div class="col-sm-4">
             <input type="text"
@@ -56,13 +55,13 @@
                    ng-model="buttonText"
                    ng-change="inputChangeEvent()"
                    ng-disabled="!status"
-                   ng-pattern="/^([a-zA-Z0-9._\s]+)$/"
                    maxlength="25"
                    size="25"
+                   required
                    autocomplete="off">
 
-            <div class="alert alert-danger top-margin" ng-show="form.button_text.$error.pattern && form.submitted">
-              {{'common.loginActivities.onlySimpleChars' | translate}}
+            <div class="alert alert-danger top-margin" ng-show="form.button_text.$error.required && form.submitted">
+              {{'common.loginActivities.notEmpty' | translate}}
             </div>
           </div>
         </div>
@@ -73,7 +72,8 @@
             ng-click="saveLoginMsg()">
             {{'common.controls.save' | translate}}
           </button>
-      </div>
+          <a class="btn btn-default pull-right cancel" href ng-click="cancel()">{{'common.controls.cancel' | translate}}</a>
+        </div>
       </fieldset>
     </div>
   </form>

http://git-wip-us.apache.org/repos/asf/ambari/blob/df0b18ca/ambari-web/app/router.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/router.js b/ambari-web/app/router.js
index 720ac0b..1537e85 100644
--- a/ambari-web/app/router.js
+++ b/ambari-web/app/router.js
@@ -361,7 +361,7 @@ App.Router = Em.Router.extend({
     }
 
     var
-      text = response.text ? response.text : "",
+      text = response.text ? response.text.replace(/(\r\n|\n|\r)/gm, '<br>') : "",
       buttonText = response.button ? response.button : Em.I18n.t('ok'),
       status = response.status && response.status == "true" ? true : false,
       self = this;
@@ -373,8 +373,15 @@ App.Router = Em.Router.extend({
         bodyClass: Ember.View.extend({
           template: Ember.Handlebars.compile(text)
         }),
-        primary: buttonText,
+        primary:null,
         secondary: null,
+        footerClass: Ember.View.extend({
+          template: Ember.Handlebars.compile(
+            '<div class="modal-footer">' +
+            '<button class="btn btn-success" {{action onPrimary target="view"}}>' + buttonText + '</button>'+
+            '</div>'
+          )
+        }),
 
         onPrimary: function () {
           self.setClusterData(data, opt, params);


[06/11] ambari git commit: AMBARI-15135. [Ambari tarballs] ambari-server java-side should support running from custom root (aonishuk)

Posted by nc...@apache.org.
AMBARI-15135. [Ambari tarballs] ambari-server java-side should support running from custom root (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/18d5a696
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/18d5a696
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/18d5a696

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 18d5a6964212237c0cd9a5c8c8211cd059cae716
Parents: df0b18c
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Feb 23 19:42:54 2016 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Feb 23 19:42:54 2016 +0200

----------------------------------------------------------------------
 Committed                                       |  0
 .../src/main/repo/install_ambari_tarball.py     |  2 +-
 ambari-server/conf/unix/ca.config               |  3 +-
 ambari-server/conf/unix/install-helper.sh       | 28 +++++++++++---
 ambari-server/conf/unix/log4j.properties        |  3 +-
 ambari-server/pom.xml                           |  1 +
 .../server/configuration/Configuration.java     | 24 ++++++------
 .../ambari/server/controller/AmbariServer.java  |  6 ++-
 .../encryption/MasterKeyServiceImpl.java        |  3 +-
 .../apache/ambari/server/utils/AmbariPath.java  | 39 ++++++++++++++++++++
 .../src/main/package/deb/control/postinst       |  2 +-
 .../src/main/package/deb/control/preinst        | 22 ++++++-----
 .../src/main/package/deb/control/prerm          |  2 +-
 .../src/main/package/rpm/postinstall.sh         | 10 +++--
 .../src/main/package/rpm/posttrans_server.sh    | 10 +++--
 .../src/main/package/rpm/preinstall.sh          | 19 ++++++----
 ambari-server/src/main/package/rpm/preremove.sh |  6 ++-
 17 files changed, 127 insertions(+), 53 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/Committed
----------------------------------------------------------------------
diff --git a/Committed b/Committed
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-common/src/main/repo/install_ambari_tarball.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/repo/install_ambari_tarball.py b/ambari-common/src/main/repo/install_ambari_tarball.py
index e6578b0..a22823f 100644
--- a/ambari-common/src/main/repo/install_ambari_tarball.py
+++ b/ambari-common/src/main/repo/install_ambari_tarball.py
@@ -42,7 +42,7 @@ DEB_DEPENDENCIES_PROPERTY =  "deb.dependency.list"
 
 FILES_TO_DOWNLOAD = [PREINST_SCRIPT, PRERM_SCRIPT, POSTINST_SCRIPT, POSTRM_SCRIPT, OS_CHECK, OS_FAMILY_DESCRIPTION, OS_PACKAGE_DEPENDENCIES]
 
-ROOT_FOLDER_ENV_VARIABLE = "AMBARI_ROOT_FOLDER"
+ROOT_FOLDER_ENV_VARIABLE = "RPM_INSTALL_PREFIX"
           
 class Utils:
   verbose = False

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-server/conf/unix/ca.config
----------------------------------------------------------------------
diff --git a/ambari-server/conf/unix/ca.config b/ambari-server/conf/unix/ca.config
index 45fdda0..c34f387 100644
--- a/ambari-server/conf/unix/ca.config
+++ b/ambari-server/conf/unix/ca.config
@@ -1,7 +1,8 @@
 [ ca ]
 default_ca             = CA_CLIENT
 [ CA_CLIENT ]
-dir		       = /var/lib/ambari-server/keys/db
+root_dir = /
+dir		       = $root_dir/var/lib/ambari-server/keys/db
 certs                  = $dir/certs
 new_certs_dir          = $dir/newcerts
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-server/conf/unix/install-helper.sh
----------------------------------------------------------------------
diff --git a/ambari-server/conf/unix/install-helper.sh b/ambari-server/conf/unix/install-helper.sh
index 0ccfc63..918ea1d 100644
--- a/ambari-server/conf/unix/install-helper.sh
+++ b/ambari-server/conf/unix/install-helper.sh
@@ -16,8 +16,7 @@
 #########################################postinstall.sh#########################
 #                      SERVER INSTALL HELPER                     #
 ##################################################################
-
-ROOT="${AMBARI_ROOT_FOLDER}"
+ROOT="${RPM_INSTALL_PREFIX}" # Customized folder, which ambari-server files are installed into ('/' or '' are default).
 
 COMMON_DIR="${ROOT}/usr/lib/python2.6/site-packages/ambari_commons"
 RESOURCE_MANAGEMENT_DIR="${ROOT}/usr/lib/python2.6/site-packages/resource_management"
@@ -25,7 +24,8 @@ JINJA_DIR="${ROOT}/usr/lib/python2.6/site-packages/ambari_jinja2"
 SIMPLEJSON_DIR="${ROOT}/usr/lib/python2.6/site-packages/ambari_simplejson"
 OLD_COMMON_DIR="${ROOT}/usr/lib/python2.6/site-packages/common_functions"
 AMBARI_SERVER="${ROOT}/usr/lib/python2.6/site-packages/ambari_server"
-INSTALL_HELPER_AGENT="${ROOT}/var/lib/ambari-agent/install-helper.sh"
+INSTALL_HELPER_AGENT="/var/lib/ambari-agent/install-helper.sh"
+CA_CONFIG="${ROOT}/var/lib/ambari-server/keys/ca.config"
 COMMON_DIR_SERVER="${ROOT}/usr/lib/ambari-server/lib/ambari_commons"
 RESOURCE_MANAGEMENT_DIR_SERVER="${ROOT}/usr/lib/ambari-server/lib/resource_management"
 JINJA_SERVER_DIR="${ROOT}/usr/lib/ambari-server/lib/ambari_jinja2"
@@ -41,6 +41,7 @@ AMBARI_SERVER_EXECUTABLE="${ROOT}/etc/init.d/ambari-server"
 AMBARI_CONFIGS_DIR="${ROOT}/etc/ambari-server/conf"
 AMBARI_CONFIGS_DIR_SAVE="${ROOT}/etc/ambari-server/conf.save"
 AMBARI_CONFIGS_DIR_SAVE_BACKUP="${ROOT}/etc/ambari-server/conf_$(date '+%d_%m_%y_%H_%M').save"
+AMBARI_LOG4J="${AMBARI_CONFIGS_DIR}/log4j.properties"
 
 do_install(){
   rm -f "$AMBARI_SERVER_EXECUTABLE_LINK"
@@ -86,14 +87,29 @@ do_install(){
     ln -s "$AMBARI_PYTHON" "$PYTHON_WRAPER_TARGET"
   fi
 
+  sed -i "s|ambari.root.dir\s*=\s*/|ambari.root.dir=${ROOT}|g" "$AMBARI_LOG4J"
+  sed -i "s|root_dir\s*=\s*/|root_dir = ${ROOT}|g" "$CA_CONFIG"
+
+  AUTOSTART_SERVER_CMD="" 
   which chkconfig > /dev/null 2>&1
   if [ "$?" -eq 0 ] ; then
-    chkconfig --add ambari-server
+    AUTOSTART_SERVER_CMD="chkconfig --add ambari-server"
   fi
   which update-rc.d > /dev/null 2>&1
   if [ "$?" -eq 0 ] ; then
-    update-rc.d ambari-server defaults
-  fi 
+    AUTOSTART_SERVER_CMD="update-rc.d ambari-server defaults"
+  fi
+    
+  # if installed to customized root folder, skip ambari-server service actions,
+  # as no file in /etc/init.d/ambari-server is present
+  if [ ! "${ROOT}/" -ef "/" ] ; then 
+	echo "Not adding ambari-server service to startup, as installed to customized root."
+	echo "If you need this functionality run the commands below, which create ambari-server service and configure it to run at startup: "
+	echo "sudo ln -s ${AMBARI_SERVER_EXECUTABLE} /etc/init.d/ambari-server"
+	echo "sudo $AUTOSTART_SERVER_CMD"
+  else
+	$AUTOSTART_SERVER_CMD
+  fi
 }
 
 do_remove(){

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-server/conf/unix/log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/conf/unix/log4j.properties b/ambari-server/conf/unix/log4j.properties
index 0ccab22..2ee32d4 100644
--- a/ambari-server/conf/unix/log4j.properties
+++ b/ambari-server/conf/unix/log4j.properties
@@ -18,7 +18,8 @@
 
 # Define some default values that can be overridden by system properties
 # Root logger option
-ambari.log.dir=/var/log/ambari-server
+ambari.root.dir=/
+ambari.log.dir=${ambari.root.dir}/var/log/ambari-server
 ambari.log.file=ambari-server.log
 ambari.config-changes.file=ambari-config-changes.log
 ambari.alerts.file=ambari-alerts.log

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 2d0559c..e3409b9 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -366,6 +366,7 @@
           <group>Development</group>
           <description>Maven Recipe: RPM Package.</description>
           <autoRequires>no</autoRequires>
+          <prefix>/</prefix>
           <requires>
             <require>${rpm.dependency.list}</require>
           </requires>

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index 4a980ee..1cb935b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -37,6 +37,7 @@ import org.apache.ambari.server.security.authorization.jwt.JwtAuthenticationProp
 import org.apache.ambari.server.security.encryption.CredentialProvider;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.apache.ambari.server.security.encryption.CertificateUtils;
+import org.apache.ambari.server.utils.AmbariPath;
 import org.apache.ambari.server.utils.Parallel;
 import org.apache.ambari.server.utils.ShellCommandUtil;
 import org.apache.commons.io.FileUtils;
@@ -57,7 +58,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
-
 import java.security.cert.CertificateException;
 import java.security.interfaces.RSAPublicKey;
 
@@ -81,23 +81,23 @@ public class Configuration {
    */
   public static final String PREFIX_DIR = "/var/lib/ambari-agent/data";
 
-  public static final String BOOTSTRAP_DIR_DEFAULT = "/var/run/ambari-server/bootstrap";
+  public static final String BOOTSTRAP_DIR_DEFAULT = AmbariPath.getPath("/var/run/ambari-server/bootstrap");
   public static final String VIEWS_DIR = "views.dir";
-  public static final String VIEWS_DIR_DEFAULT = "/var/lib/ambari-server/resources/views";
+  public static final String VIEWS_DIR_DEFAULT = AmbariPath.getPath("/var/lib/ambari-server/resources/views");
   public static final String VIEWS_VALIDATE = "views.validate";
   public static final String VIEWS_VALIDATE_DEFAULT = "false";
   public static final String VIEWS_REMOVE_UNDEPLOYED = "views.remove.undeployed";
   public static final String VIEWS_REMOVE_UNDEPLOYED_DEFAULT = "false";
   public static final String WEBAPP_DIR = "webapp.dir";
   public static final String BOOTSTRAP_SCRIPT = "bootstrap.script";
-  public static final String BOOTSTRAP_SCRIPT_DEFAULT = "/usr/bin/ambari_bootstrap";
+  public static final String BOOTSTRAP_SCRIPT_DEFAULT = AmbariPath.getPath("/usr/bin/ambari_bootstrap");
   public static final String BOOTSTRAP_SETUP_AGENT_SCRIPT = "bootstrap.setup_agent.script";
   public static final String BOOTSTRAP_SETUP_AGENT_PASSWORD = "bootstrap.setup_agent.password";
   public static final String BOOTSTRAP_MASTER_HOSTNAME = "bootstrap.master_host_name";
   public static final String RECOMMENDATIONS_DIR = "recommendations.dir";
-  public static final String RECOMMENDATIONS_DIR_DEFAULT = "/var/run/ambari-server/stack-recommendations";
+  public static final String RECOMMENDATIONS_DIR_DEFAULT = AmbariPath.getPath("/var/run/ambari-server/stack-recommendations");
   public static final String STACK_ADVISOR_SCRIPT = "stackadvisor.script";
-  public static final String STACK_ADVISOR_SCRIPT_DEFAULT = "/var/lib/ambari-server/resources/scripts/stack_advisor.py";
+  public static final String STACK_ADVISOR_SCRIPT_DEFAULT = AmbariPath.getPath("/var/lib/ambari-server/resources/scripts/stack_advisor.py");
   public static final String AMBARI_PYTHON_WRAP_KEY = "ambari.python.wrap";
   public static final String AMBARI_PYTHON_WRAP_DEFAULT = "ambari-python-wrap";
   public static final String API_AUTHENTICATED_USER = "api.authenticated.user";
@@ -302,7 +302,7 @@ public class Configuration {
   public static final String DEFAULT_SCHEDULER_START_DELAY_SECONDS = "120";
   public static final String DEFAULT_EXECUTION_SCHEDULER_WAIT_SECONDS = "1";
   public static final String SERVER_TMP_DIR_KEY = "server.tmp.dir";
-  public static final String SERVER_TMP_DIR_DEFAULT = "/var/lib/ambari-server/tmp";
+  public static final String SERVER_TMP_DIR_DEFAULT = AmbariPath.getPath("/var/lib/ambari-server/tmp");
   public static final String EXTERNAL_SCRIPT_TIMEOUT_KEY = "server.script.timeout";
   public static final String EXTERNAL_SCRIPT_TIMEOUT_DEFAULT = "5000";
   public static final String DEF_ARCHIVE_EXTENSION;
@@ -316,7 +316,7 @@ public class Configuration {
   public static final String KDC_CONNECTION_CHECK_TIMEOUT_KEY = "kdcserver.connection.check.timeout";
   public static final String KDC_CONNECTION_CHECK_TIMEOUT_DEFAULT = "10000";
   public static final String KERBEROS_KEYTAB_CACHE_DIR_KEY = "kerberos.keytab.cache.dir";
-  public static final String KERBEROS_KEYTAB_CACHE_DIR_DEFAULT = "/var/lib/ambari-server/data/cache";
+  public static final String KERBEROS_KEYTAB_CACHE_DIR_DEFAULT = AmbariPath.getPath("/var/lib/ambari-server/data/cache");
   public static final String KERBEROS_CHECK_JAAS_CONFIGURATION_KEY = "kerberos.check.jaas.configuration";
   public static final String KERBEROS_CHECK_JAAS_CONFIGURATION_DEFAULT = "false";
 
@@ -375,7 +375,7 @@ public class Configuration {
   protected static final long SERVER_HRC_STATUS_SUMMARY_CACHE_SIZE_DEFAULT = 10000L;
   protected static final long SERVER_HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION_DEFAULT = 30; //minutes
 
-  private static final String CUSTOM_ACTION_DEFINITION_DEF_VALUE = "/var/lib/ambari-server/resources/custom_action_definitions";
+  private static final String CUSTOM_ACTION_DEFINITION_DEF_VALUE = AmbariPath.getPath("/var/lib/ambari-server/resources/custom_action_definitions");
 
   private static final long SERVER_EC_CACHE_SIZE_DEFAULT = 10000L;
   private static final String SERVER_STALE_CONFIG_CACHE_ENABLED_DEFAULT = "true";
@@ -393,8 +393,8 @@ public class Configuration {
   private static final String SRVR_DISABLED_CIPHERS_DEFAULT = "";
   private static final String SRVR_DISABLED_PROTOCOLS_DEFAULT = "";
   private static final String PASSPHRASE_ENV_DEFAULT = "AMBARI_PASSPHRASE";
-  private static final String RESOURCES_DIR_DEFAULT = "/var/lib/ambari-server/resources/";
-  private static final String SHARED_RESOURCES_DIR_DEFAULT = "/usr/lib/ambari-server/lib/ambari_commons/resources";
+  private static final String RESOURCES_DIR_DEFAULT = AmbariPath.getPath("/var/lib/ambari-server/resources/");
+  private static final String SHARED_RESOURCES_DIR_DEFAULT = AmbariPath.getPath("/usr/lib/ambari-server/lib/ambari_commons/resources");
   private static final String ANONYMOUS_AUDIT_NAME_KEY = "anonymous.audit.name";
 
   private static final int CLIENT_API_PORT_DEFAULT = 8080;
@@ -996,7 +996,7 @@ public class Configuration {
 
   public String getBootSetupAgentScript() {
     return properties.getProperty(BOOTSTRAP_SETUP_AGENT_SCRIPT,
-        "/usr/lib/python2.6/site-packages/ambari_server/setupAgent.py");
+        AmbariPath.getPath("/usr/lib/python2.6/site-packages/ambari_server/setupAgent.py"));
   }
 
   public String getBootSetupAgentPassword() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
index ad4a59b..4695990 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
@@ -29,6 +29,7 @@ import com.google.inject.Singleton;
 import com.google.inject.name.Named;
 import com.google.inject.persist.Transactional;
 import com.sun.jersey.spi.container.servlet.ServletContainer;
+
 import org.apache.ambari.eventdb.webservice.WorkflowJsonService;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.StateRecoveryManager;
@@ -103,6 +104,7 @@ import org.apache.ambari.server.topology.BlueprintFactory;
 import org.apache.ambari.server.topology.SecurityConfigurationFactory;
 import org.apache.ambari.server.topology.TopologyManager;
 import org.apache.ambari.server.topology.TopologyRequestFactoryImpl;
+import org.apache.ambari.server.utils.AmbariPath;
 import org.apache.ambari.server.utils.RetryHelper;
 import org.apache.ambari.server.utils.StageUtils;
 import org.apache.ambari.server.utils.VersionUtils;
@@ -137,6 +139,7 @@ import org.springframework.web.filter.DelegatingFilterProxy;
 
 import javax.crypto.BadPaddingException;
 import javax.servlet.DispatcherType;
+
 import java.io.File;
 import java.io.IOException;
 import java.net.Authenticator;
@@ -166,8 +169,7 @@ public class AmbariServer {
 
   private static final String CLASSPATH_CHECK_CLASS = "org/apache/ambari/server/controller/AmbariServer.class";
   private static final String CLASSPATH_SANITY_CHECK_FAILURE_MESSAGE = "%s class is found in multiple jar files. Possible reasons include multiple ambari server jar files in the ambari classpath.\n" +
-      "Check for additional ambari server jar files and check that /usr/lib/ambari-server/ambari-server*.jar matches only one file.";
-
+      String.format("Check for additional ambari server jar files and check that %s matches only one file.", AmbariPath.getPath("/usr/lib/ambari-server/ambari-server*.jar"));
   static {
     Enumeration<URL> ambariServerClassUrls;
     try {

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-server/src/main/java/org/apache/ambari/server/security/encryption/MasterKeyServiceImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/encryption/MasterKeyServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/security/encryption/MasterKeyServiceImpl.java
index 19056fa..6c52cf4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/encryption/MasterKeyServiceImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/encryption/MasterKeyServiceImpl.java
@@ -20,6 +20,7 @@ package org.apache.ambari.server.security.encryption;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.utils.AmbariPath;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
@@ -104,7 +105,7 @@ public class MasterKeyServiceImpl implements MasterKeyService {
 
   public static void main(String args[]) {
     String masterKey = "ThisissomeSecretPassPhrasse";
-    String masterKeyLocation = "/var/lib/ambari-server/keys/master";
+    String masterKeyLocation = AmbariPath.getPath("/var/lib/ambari-server/keys/master");
     boolean persistMasterKey = false;
     if (args != null && args.length > 0) {
       masterKey = args[0];

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-server/src/main/java/org/apache/ambari/server/utils/AmbariPath.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/AmbariPath.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/AmbariPath.java
new file mode 100644
index 0000000..d73226d
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/AmbariPath.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.utils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class AmbariPath {
+  private static Logger LOG = LoggerFactory.getLogger(AmbariPath.class);
+  
+  public static final String AMBARI_SERVER_ROOT_ENV_VARIABLE = "ROOT";
+  public static final String rootDirectory = System.getenv(AMBARI_SERVER_ROOT_ENV_VARIABLE);
+  
+  public static String getPath(String path) {
+    if(rootDirectory == null) {  
+      LOG.warn("Cannot get $ROOT enviroment varaible. Installed to custom root directory Ambari might not work correctly.");
+      return path;
+    }
+    String result = (rootDirectory + path).replaceAll("/+","/");
+    LOG.info(result);
+    return result;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-server/src/main/package/deb/control/postinst
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/package/deb/control/postinst b/ambari-server/src/main/package/deb/control/postinst
index 886e4fc..9546009 100644
--- a/ambari-server/src/main/package/deb/control/postinst
+++ b/ambari-server/src/main/package/deb/control/postinst
@@ -16,7 +16,7 @@
 
 # Warning: don't add changes to this script directly, please add changes to install-helper.sh.
 
-INSTALL_HELPER="${AMBARI_ROOT_FOLDER}/var/lib/ambari-server/install-helper.sh"
+INSTALL_HELPER="${RPM_INSTALL_PREFIX}/var/lib/ambari-server/install-helper.sh"
 
 if [ "$1" == "configure" ] ; then
   if [ -f "$INSTALL_HELPER" ]; then

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-server/src/main/package/deb/control/preinst
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/package/deb/control/preinst b/ambari-server/src/main/package/deb/control/preinst
index 9dcd8bf..eb94f95 100644
--- a/ambari-server/src/main/package/deb/control/preinst
+++ b/ambari-server/src/main/package/deb/control/preinst
@@ -14,26 +14,28 @@
 # See the License for the specific language governing permissions and
 # limitations under the License
 
-STACKS_FOLDER="${AMBARI_ROOT_FOLDER}/var/lib/ambari-server/resources/stacks"
-STACKS_FOLDER_OLD=${AMBARI_ROOT_FOLDER}/var/lib/ambari-server/resources/stacks_$(date '+%d_%m_%y_%H_%M').old
+ROOT="${RPM_INSTALL_PREFIX}"
 
-COMMON_SERVICES_FOLDER="${AMBARI_ROOT_FOLDER}/var/lib/ambari-server/resources/common-services"
-COMMON_SERVICES_FOLDER_OLD=${AMBARI_ROOT_FOLDER}/var/lib/ambari-server/resources/common-services_$(date '+%d_%m_%y_%H_%M').old
+STACKS_FOLDER="${ROOT}/var/lib/ambari-server/resources/stacks"
+STACKS_FOLDER_OLD=${ROOT}/var/lib/ambari-server/resources/stacks_$(date '+%d_%m_%y_%H_%M').old
 
-AMBARI_PROPERTIES="${AMBARI_ROOT_FOLDER}/etc/ambari-server/conf/ambari.properties"
+COMMON_SERVICES_FOLDER="${ROOT}/var/lib/ambari-server/resources/common-services"
+COMMON_SERVICES_FOLDER_OLD=${ROOT}/var/lib/ambari-server/resources/common-services_$(date '+%d_%m_%y_%H_%M').old
+
+AMBARI_PROPERTIES="${ROOT}/etc/ambari-server/conf/ambari.properties"
 AMBARI_PROPERTIES_OLD="$AMBARI_PROPERTIES.rpmsave"
 
-AMBARI_ENV="${AMBARI_ROOT_FOLDER}/var/lib/ambari-server/ambari-env.sh"
+AMBARI_ENV="${ROOT}/var/lib/ambari-server/ambari-env.sh"
 AMBARI_ENV_OLD="$AMBARI_ENV.rpmsave"
 
-AMBARI_KRB_JAAS_LOGIN_FILE="${AMBARI_ROOT_FOLDER}/etc/ambari-server/conf/krb5JAASLogin.conf"
+AMBARI_KRB_JAAS_LOGIN_FILE="${ROOT}/etc/ambari-server/conf/krb5JAASLogin.conf"
 AMBARI_KRB_JAAS_LOGIN_FILE_OLD="$AMBARI_KRB_JAAS_LOGIN_FILE.rpmsave"
 
-AMBARI_VIEWS_FOLDER="${AMBARI_ROOT_FOLDER}/var/lib/ambari-server/resources/views"
+AMBARI_VIEWS_FOLDER="${ROOT}/var/lib/ambari-server/resources/views"
 AMBARI_VIEWS_BACKUP_FOLDER="$AMBARI_VIEWS_FOLDER/backups"
 
-SERVER_CONF_SAVE="${AMBARI_ROOT_FOLDER}/etc/ambari-server/conf.save"
-SERVER_CONF_SAVE_BACKUP="${AMBARI_ROOT_FOLDER}/etc/ambari-server/conf_$(date '+%d_%m_%y_%H_%M').save"
+SERVER_CONF_SAVE="${ROOT}/etc/ambari-server/conf.save"
+SERVER_CONF_SAVE_BACKUP="${ROOT}/etc/ambari-server/conf_$(date '+%d_%m_%y_%H_%M').save"
 
 if [ -d "$SERVER_CONF_SAVE" ]
 then

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-server/src/main/package/deb/control/prerm
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/package/deb/control/prerm b/ambari-server/src/main/package/deb/control/prerm
index cac2abe..b85bd3b 100644
--- a/ambari-server/src/main/package/deb/control/prerm
+++ b/ambari-server/src/main/package/deb/control/prerm
@@ -16,7 +16,7 @@
 
 # Warning: don't add changes to this script directly, please add changes to install-helper.sh.
 
-INSTALL_HELPER="${AMBARI_ROOT_FOLDER}/var/lib/ambari-server/install-helper.sh"
+INSTALL_HELPER="${RPM_INSTALL_PREFIX}/var/lib/ambari-server/install-helper.sh"
 
 if [ "$1" == "remove" ] ; then # Action is uninstall
     if [ -f "$INSTALL_HELPER" ]; then

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-server/src/main/package/rpm/postinstall.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/package/rpm/postinstall.sh b/ambari-server/src/main/package/rpm/postinstall.sh
index 5ca1d17..1e8e0f0 100644
--- a/ambari-server/src/main/package/rpm/postinstall.sh
+++ b/ambari-server/src/main/package/rpm/postinstall.sh
@@ -15,15 +15,17 @@
 
 # Warning: don't add changes to this script directly, please add changes to install-helper.sh.
 
+INSTALL_HELPER="${RPM_INSTALL_PREFIX}/var/lib/ambari-server/install-helper.sh"
+
 case "$1" in
   1) # Action install
-    if [ -f "/var/lib/ambari-server/install-helper.sh" ]; then
-        /var/lib/ambari-server/install-helper.sh install
+    if [ -f "$INSTALL_HELPER" ]; then
+        $INSTALL_HELPER install
     fi
   ;;
   2) # Action upgrade
-    if [ -f "/var/lib/ambari-server/install-helper.sh" ]; then
-        /var/lib/ambari-server/install-helper.sh upgrade
+    if [ -f "$INSTALL_HELPER" ]; then
+        $INSTALL_HELPER upgrade
     fi
   ;;
 esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-server/src/main/package/rpm/posttrans_server.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/package/rpm/posttrans_server.sh b/ambari-server/src/main/package/rpm/posttrans_server.sh
index 6df3c5d..6aa5612 100644
--- a/ambari-server/src/main/package/rpm/posttrans_server.sh
+++ b/ambari-server/src/main/package/rpm/posttrans_server.sh
@@ -14,10 +14,12 @@
 # limitations under the License
 
 
-RESOURCE_MANAGEMENT_DIR="/usr/lib/python2.6/site-packages/resource_management"
-RESOURCE_MANAGEMENT_DIR_SERVER="/usr/lib/ambari-server/lib/resource_management"
-JINJA_DIR="/usr/lib/python2.6/site-packages/ambari_jinja2"
-JINJA_SERVER_DIR="/usr/lib/ambari-server/lib/ambari_jinja2"
+ROOT="${RPM_INSTALL_PREFIX}"
+
+RESOURCE_MANAGEMENT_DIR="${ROOT}/usr/lib/python2.6/site-packages/resource_management"
+RESOURCE_MANAGEMENT_DIR_SERVER="${ROOT}/usr/lib/ambari-server/lib/resource_management"
+JINJA_DIR="${ROOT}/usr/lib/python2.6/site-packages/ambari_jinja2"
+JINJA_SERVER_DIR="${ROOT}/usr/lib/ambari-server/lib/ambari_jinja2"
 
 # remove RESOURCE_MANAGEMENT_DIR if it's a directory
 if [ -d "$RESOURCE_MANAGEMENT_DIR" ]; then  # resource_management dir exists

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-server/src/main/package/rpm/preinstall.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/package/rpm/preinstall.sh b/ambari-server/src/main/package/rpm/preinstall.sh
index 9533964..a8e781e 100644
--- a/ambari-server/src/main/package/rpm/preinstall.sh
+++ b/ambari-server/src/main/package/rpm/preinstall.sh
@@ -13,18 +13,23 @@
 # See the License for the specific language governing permissions and
 # limitations under the License
 
-STACKS_FOLDER="/var/lib/ambari-server/resources/stacks"
-STACKS_FOLDER_OLD=/var/lib/ambari-server/resources/stacks_$(date '+%d_%m_%y_%H_%M').old
+ROOT="${RPM_INSTALL_PREFIX}"
 
-COMMON_SERVICES_FOLDER="/var/lib/ambari-server/resources/common-services"
-COMMON_SERVICES_FOLDER_OLD=/var/lib/ambari-server/resources/common-services_$(date '+%d_%m_%y_%H_%M').old
+STACKS_FOLDER="${ROOT}/var/lib/ambari-server/resources/stacks"
+STACKS_FOLDER_OLD="${ROOT}/var/lib/ambari-server/resources/stacks_$(date '+%d_%m_%y_%H_%M').old"
 
-AMBARI_VIEWS_FOLDER="/var/lib/ambari-server/resources/views"
+COMMON_SERVICES_FOLDER=${ROOT}"/var/lib/ambari-server/resources/common-services"
+COMMON_SERVICES_FOLDER_OLD="${ROOT}/var/lib/ambari-server/resources/common-services_$(date '+%d_%m_%y_%H_%M').old"
+
+AMBARI_VIEWS_FOLDER="${ROOT}/var/lib/ambari-server/resources/views"
 AMBARI_VIEWS_BACKUP_FOLDER="$AMBARI_VIEWS_FOLDER/backups"
 
-if [ -d "/etc/ambari-server/conf.save" ]
+SERVER_CONF_SAVE="${ROOT}/etc/ambari-server/conf.save"
+SERVER_CONF_SAVE_BACKUP="${ROOT}/etc/ambari-server/conf_$(date '+%d_%m_%y_%H_%M').save"
+
+if [ -d "$SERVER_CONF_SAVE" ]
 then
-    mv /etc/ambari-server/conf.save /etc/ambari-server/conf_$(date '+%d_%m_%y_%H_%M').save
+    mv "$SERVER_CONF_SAVE" "$SERVER_CONF_SAVE_BACKUP"
 fi
 
 if [ -d "$STACKS_FOLDER" ]

http://git-wip-us.apache.org/repos/asf/ambari/blob/18d5a696/ambari-server/src/main/package/rpm/preremove.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/package/rpm/preremove.sh b/ambari-server/src/main/package/rpm/preremove.sh
index 445e9ad..bd20323 100644
--- a/ambari-server/src/main/package/rpm/preremove.sh
+++ b/ambari-server/src/main/package/rpm/preremove.sh
@@ -19,9 +19,11 @@
 
 # Warning: don't add changes to this script directly, please add changes to install-helper.sh.
 
+INSTALL_HELPER="${RPM_INSTALL_PREFIX}/var/lib/ambari-server/install-helper.sh"
+
 if [ "$1" -eq 0 ]; then  # Action is uninstall
-    if [ -f "/var/lib/ambari-server/install-helper.sh" ]; then
-      /var/lib/ambari-server/install-helper.sh remove
+    if [ -f "$INSTALL_HELPER" ]; then
+      $INSTALL_HELPER remove
     fi
 fi
 


[03/11] ambari git commit: Revert "AMBARI-14798. Users cannot login with uppercase username (Oliver Szabo via rlevas)"

Posted by nc...@apache.org.
Revert "AMBARI-14798. Users cannot login with uppercase username (Oliver Szabo via rlevas)"

This reverts commit e1ca24160027d73cf27b90b35bc0757442a0cbdd.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f7ebe912
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f7ebe912
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f7ebe912

Branch: refs/heads/branch-dev-patch-upgrade
Commit: f7ebe912b7647caa97b8c28f243d0f8a92f5dfb6
Parents: e1ca241
Author: Robert Levas <rl...@hortonworks.com>
Authored: Tue Feb 23 11:13:11 2016 -0500
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Tue Feb 23 11:13:11 2016 -0500

----------------------------------------------------------------------
 .../server/api/services/ActiveWidgetLayoutService.java | 10 +++++++---
 .../server/api/services/UserAuthorizationService.java  |  4 +---
 .../server/api/services/UserPrivilegeService.java      |  3 +--
 .../apache/ambari/server/api/services/UserService.java |  3 +--
 .../api/services/UserAuthorizationServiceTest.java     | 12 ------------
 .../server/api/services/UserPrivilegeServiceTest.java  | 13 -------------
 6 files changed, 10 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f7ebe912/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutService.java
index a0c3386..c4403df 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActiveWidgetLayoutService.java
@@ -17,21 +17,25 @@
  */
 package org.apache.ambari.server.api.services;
 
+import com.sun.jersey.core.util.Base64;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.controller.spi.Resource;
 
+import javax.ws.rs.DELETE;
 import javax.ws.rs.GET;
+import javax.ws.rs.POST;
 import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.UriInfo;
+import java.nio.charset.Charset;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.commons.lang.StringUtils;
-
 /**
  * WidgetLayout Service
  */
@@ -69,7 +73,7 @@ public class ActiveWidgetLayoutService extends BaseService {
 
   private ResourceInstance createResource(String widgetLayoutId) {
     Map<Resource.Type,String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.User, StringUtils.lowerCase(userName));
+    mapIds.put(Resource.Type.User, userName);
     return createResource(Resource.Type.ActiveWidgetLayout, mapIds);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7ebe912/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserAuthorizationService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserAuthorizationService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserAuthorizationService.java
index c288fdb..6861d3d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserAuthorizationService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserAuthorizationService.java
@@ -32,8 +32,6 @@ import javax.ws.rs.core.UriInfo;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.commons.lang.StringUtils;
-
 /**
  * UserAuthorizationService is a read-only service responsible for user authorization resource requests.
  * <p/>
@@ -96,7 +94,7 @@ public class UserAuthorizationService extends BaseService {
    */
   protected ResourceInstance createAuthorizationResource(String authorizationId) {
     Map<Resource.Type, String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.User, StringUtils.lowerCase(username));
+    mapIds.put(Resource.Type.User, username);
     mapIds.put(Resource.Type.UserAuthorization, authorizationId);
     return createResource(Resource.Type.UserAuthorization, mapIds);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7ebe912/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserPrivilegeService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserPrivilegeService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserPrivilegeService.java
index 86c4995..80769cf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserPrivilegeService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserPrivilegeService.java
@@ -29,7 +29,6 @@ import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.UriInfo;
 
-import org.apache.commons.lang.StringUtils;
 /**
  *  Service responsible for user privilege resource requests.
  */
@@ -73,7 +72,7 @@ public class UserPrivilegeService extends PrivilegeService {
   @Override
   protected ResourceInstance createPrivilegeResource(String privilegeId) {
     final Map<Resource.Type, String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.User, StringUtils.lowerCase(userName));
+    mapIds.put(Resource.Type.User, userName);
     mapIds.put(Resource.Type.UserPrivilege, privilegeId);
     return createResource(Resource.Type.UserPrivilege, mapIds);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7ebe912/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java
index c46c373..fea5eca 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java
@@ -31,7 +31,6 @@ import javax.ws.rs.core.UriInfo;
 
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.controller.spi.Resource;
-
 import org.apache.commons.lang.StringUtils;
 
 import java.util.Collections;
@@ -172,6 +171,6 @@ public class UserService extends BaseService {
    */
   private ResourceInstance createUserResource(String userName) {
     return createResource(Resource.Type.User,
-        Collections.singletonMap(Resource.Type.User, StringUtils.lowerCase(userName)));
+        Collections.singletonMap(Resource.Type.User, userName));
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7ebe912/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserAuthorizationServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserAuthorizationServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserAuthorizationServiceTest.java
index c3270bb..9627d19 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserAuthorizationServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserAuthorizationServiceTest.java
@@ -21,8 +21,6 @@ package org.apache.ambari.server.api.services;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
 import org.apache.ambari.server.api.services.serializers.ResultSerializer;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.junit.Test;
 
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.UriInfo;
@@ -56,16 +54,6 @@ public class UserAuthorizationServiceTest extends BaseServiceTest {
     return listInvocations;
   }
 
-  @Test
-  public void testCreateAuthorizationResourceWithUppercaseUsername() {
-    // GIVEN
-    UserAuthorizationService userAuthorizationService= new UserAuthorizationService("Jdoe");
-    // WHEN
-    ResourceInstance result = userAuthorizationService.createAuthorizationResource("id");
-    // THEN
-    assertEquals("jdoe", result.getKeyValueMap().get(Resource.Type.User));
-  }
-
 
   private class TestUserAuthorizationService extends UserAuthorizationService {
     private String id;

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7ebe912/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserPrivilegeServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserPrivilegeServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserPrivilegeServiceTest.java
index db2d38a..269315a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserPrivilegeServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/UserPrivilegeServiceTest.java
@@ -33,13 +33,10 @@ import junit.framework.Assert;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
 import org.apache.ambari.server.api.services.serializers.ResultSerializer;
-import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.Resource.Type;
 import org.easymock.EasyMock;
 import org.junit.Test;
 
-import static org.junit.Assert.assertEquals;
-
 /**
  * Unit tests for GroupService.
  */
@@ -85,16 +82,6 @@ public class UserPrivilegeServiceTest extends BaseServiceTest {
     }
   }
 
-  @Test
-  public void testCreatePrivilegeResourcesWithUppercaseUsername() {
-    // GIVEN
-    UserPrivilegeService userPrivilegeService = new UserPrivilegeService("User");
-    // WHEN
-    ResourceInstance result = userPrivilegeService.createPrivilegeResource("test");
-    // THEN
-    assertEquals( "user", result.getKeyValueMap().get(Resource.Type.User));
-  }
-
   private class TestUserPrivilegeService extends UserPrivilegeService {
 
     public TestUserPrivilegeService() {


[10/11] ambari git commit: AMBARI-15141. Start all services request aborts in the middle and hosts go into heartbeat-lost state. (mpapirkovskyy)

Posted by nc...@apache.org.
AMBARI-15141. Start all services request aborts in the middle and hosts go into heartbeat-lost state. (mpapirkovskyy)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/083ac6da
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/083ac6da
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/083ac6da

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 083ac6dab5cf59c01da054eb656507c089a54620
Parents: 9d7ff5f
Author: Myroslav Papirkovskyi <mp...@hortonworks.com>
Authored: Tue Feb 23 13:01:13 2016 +0200
Committer: Myroslav Papirkovskyi <mp...@hortonworks.com>
Committed: Tue Feb 23 21:04:22 2016 +0200

----------------------------------------------------------------------
 .../ambari/server/agent/HeartBeatHandler.java   |  582 +------
 .../ambari/server/agent/HeartbeatMonitor.java   |    6 +
 .../ambari/server/agent/HeartbeatProcessor.java |  773 +++++++++
 .../ambari/server/orm/dao/HostVersionDAO.java   |   78 +-
 .../server/orm/entities/HostVersionEntity.java  |    9 +
 .../server/state/cluster/ClusterImpl.java       |    6 +-
 .../svccomphost/ServiceComponentHostImpl.java   |   72 +-
 .../server/agent/HeartbeatProcessorTest.java    | 1290 +++++++++++++++
 .../server/agent/HeartbeatTestHelper.java       |  229 +++
 .../server/agent/TestHeartbeatHandler.java      | 1489 ++----------------
 10 files changed, 2559 insertions(+), 1975 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/083ac6da/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
index 248ce4b..ba14446 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
@@ -126,6 +126,7 @@ public class HeartBeatHandler {
   private final ActionQueue actionQueue;
   private final ActionManager actionManager;
   private HeartbeatMonitor heartbeatMonitor;
+  private HeartbeatProcessor heartbeatProcessor;
 
   @Inject
   private Injector injector;
@@ -137,38 +138,11 @@ public class HeartBeatHandler {
   private AmbariMetaInfo ambariMetaInfo;
 
   @Inject
-  private ActionMetadata actionMetadata;
-
-  @Inject
-  private Gson gson;
-
-  @Inject
   private ConfigHelper configHelper;
 
   @Inject
-  private HostDAO hostDAO;
-
-  @Inject
   private AlertDefinitionHash alertDefinitionHash;
 
-  /**
-   * Publishes {@link AlertEvent} instances.
-   */
-  @Inject
-  private AlertEventPublisher alertEventPublisher;
-
-  @Inject
-  private AmbariEventPublisher ambariEventPublisher;
-
-  @Inject
-  private VersionEventPublisher versionEventPublisher;
-
-
-  /**
-   * KerberosPrincipalHostDAO used to set and get Kerberos principal details
-   */
-  @Inject
-  private KerberosPrincipalHostDAO kerberosPrincipalHostDAO;
 
   /**
    * KerberosIdentityDataFileReaderFactory used to create KerberosIdentityDataFileReader instances
@@ -187,10 +161,12 @@ public class HeartBeatHandler {
     actionQueue = aq;
     actionManager = am;
     heartbeatMonitor = new HeartbeatMonitor(fsm, aq, am, 60000, injector);
+    heartbeatProcessor = new HeartbeatProcessor(fsm, am, heartbeatMonitor, injector); //TODO modify to match pattern
     injector.injectMembers(this);
   }
 
   public void start() {
+    heartbeatProcessor.startAsync();
     heartbeatMonitor.start();
   }
 
@@ -198,6 +174,14 @@ public class HeartBeatHandler {
     this.heartbeatMonitor = heartbeatMonitor;
   }
 
+  public void setHeartbeatProcessor(HeartbeatProcessor heartbeatProcessor) {
+    this.heartbeatProcessor = heartbeatProcessor;
+  }
+
+  public HeartbeatProcessor getHeartbeatProcessor() {
+    return heartbeatProcessor;
+  }
+
   public HeartBeatResponse handleHeartBeat(HeartBeat heartbeat)
       throws AmbariException {
     long now = System.currentTimeMillis();
@@ -283,18 +267,7 @@ public class HeartBeatHandler {
       return createRegisterCommand();
     }
 
-    // Examine heartbeat for command reports
-    processCommandReports(heartbeat, hostname, clusterFsm, now);
-
-    // Examine heartbeat for component live status reports
-    processStatusReports(heartbeat, hostname, clusterFsm);
-
-    // Calculate host status
-    // NOTE: This step must be after processing command/status reports
-    processHostStatus(heartbeat, hostname);
-
-    // Example heartbeat for alerts from the host or its components
-    processAlerts(heartbeat, hostname);
+    heartbeatProcessor.addHeartbeat(heartbeat);
 
     // Send commands if node is active
     if (hostObject.getState().equals(HostState.HEALTHY)) {
@@ -305,33 +278,7 @@ public class HeartBeatHandler {
     return response;
   }
 
-  /**
-   * Extracts all of the {@link Alert}s from the heartbeat and fires
-   * {@link AlertEvent}s for each one. If there is a problem looking up the
-   * cluster, then alerts will not be processed.
-   *
-   * @param heartbeat
-   *          the heartbeat to process.
-   * @param hostname
-   *          the host that the heartbeat is for.
-   */
-  protected void processAlerts(HeartBeat heartbeat, String hostname) {
-
-    if (null == hostname || null == heartbeat) {
-      return;
-    }
 
-    if (null != heartbeat.getAlerts()) {
-      AlertEvent event = new AlertReceivedEvent(heartbeat.getAlerts());
-      for (Alert alert : event.getAlerts()) {
-        if (alert.getHostName() == null) {
-          alert.setHostName(hostname);
-        }
-      }
-      alertEventPublisher.publish(event);
-
-    }
-  }
 
   protected void processRecoveryReport(RecoveryReport recoveryReport, String hostname) throws AmbariException {
     LOG.debug("Received recovery report: " + recoveryReport.toString());
@@ -339,480 +286,6 @@ public class HeartBeatHandler {
     host.setRecoveryReport(recoveryReport);
   }
 
-  protected void processHostStatus(HeartBeat heartbeat, String hostname) throws AmbariException {
-
-    Host host = clusterFsm.getHost(hostname);
-    HealthStatus healthStatus = host.getHealthStatus().getHealthStatus();
-
-    if (!healthStatus.equals(HostHealthStatus.HealthStatus.UNKNOWN)) {
-
-      List<ComponentStatus> componentStatuses = heartbeat.getComponentStatus();
-      //Host status info could be calculated only if agent returned statuses in heartbeat
-      //Or, if a command is executed that can change component status
-      boolean calculateHostStatus = false;
-      String clusterName = null;
-      if (componentStatuses.size() > 0) {
-        calculateHostStatus = true;
-        for (ComponentStatus componentStatus : componentStatuses) {
-          clusterName = componentStatus.getClusterName();
-          break;
-        }
-      }
-
-      if (!calculateHostStatus) {
-        List<CommandReport> reports = heartbeat.getReports();
-        for (CommandReport report : reports) {
-          if (RoleCommand.ACTIONEXECUTE.toString().equals(report.getRoleCommand())) {
-            continue;
-          }
-
-          String service = report.getServiceName();
-          if (actionMetadata.getActions(service.toLowerCase()).contains(report.getRole())) {
-            continue;
-          }
-          if (report.getStatus().equals("COMPLETED")) {
-            calculateHostStatus = true;
-            clusterName = report.getClusterName();
-            break;
-          }
-        }
-      }
-
-      if (calculateHostStatus) {
-        //Use actual component status to compute the host status
-        int masterCount = 0;
-        int mastersRunning = 0;
-        int slaveCount = 0;
-        int slavesRunning = 0;
-
-        StackId stackId;
-        Cluster cluster = clusterFsm.getCluster(clusterName);
-        stackId = cluster.getDesiredStackVersion();
-
-        MaintenanceStateHelper psh = injector.getInstance(MaintenanceStateHelper.class);
-
-        List<ServiceComponentHost> scHosts = cluster.getServiceComponentHosts(heartbeat.getHostname());
-        for (ServiceComponentHost scHost : scHosts) {
-          ComponentInfo componentInfo =
-              ambariMetaInfo.getComponent(stackId.getStackName(),
-                  stackId.getStackVersion(), scHost.getServiceName(),
-                  scHost.getServiceComponentName());
-
-          String status = scHost.getState().name();
-
-          String category = componentInfo.getCategory();
-
-          if (MaintenanceState.OFF == psh.getEffectiveState(scHost, host)) {
-            if (category.equals("MASTER")) {
-              ++masterCount;
-              if (status.equals("STARTED")) {
-                ++mastersRunning;
-              }
-            } else if (category.equals("SLAVE")) {
-              ++slaveCount;
-              if (status.equals("STARTED")) {
-                ++slavesRunning;
-              }
-            }
-          }
-        }
-
-        if (masterCount == mastersRunning && slaveCount == slavesRunning) {
-          healthStatus = HealthStatus.HEALTHY;
-        } else if (masterCount > 0 && mastersRunning < masterCount) {
-          healthStatus = HealthStatus.UNHEALTHY;
-        } else {
-          healthStatus = HealthStatus.ALERT;
-        }
-
-        host.setStatus(healthStatus.name());
-        host.persist();
-      }
-
-      //If host doesn't belong to any cluster
-      if ((clusterFsm.getClustersForHost(host.getHostName())).size() == 0) {
-        healthStatus = HealthStatus.HEALTHY;
-        host.setStatus(healthStatus.name());
-        host.persist();
-      }
-    }
-  }
-
-  protected void processCommandReports(
-      HeartBeat heartbeat, String hostname, Clusters clusterFsm, long now)
-      throws AmbariException {
-    List<CommandReport> reports = heartbeat.getReports();
-
-    // Cache HostRoleCommand entities because we will need them few times
-    List<Long> taskIds = new ArrayList<Long>();
-    for (CommandReport report : reports) {
-      taskIds.add(report.getTaskId());
-    }
-    Collection<HostRoleCommand> commands = actionManager.getTasks(taskIds);
-
-    Iterator<HostRoleCommand> hostRoleCommandIterator = commands.iterator();
-    for (CommandReport report : reports) {
-
-      Long clusterId = null;
-      if (report.getClusterName() != null) {
-        try {
-          Cluster cluster = clusterFsm.getCluster(report.getClusterName());
-          clusterId = Long.valueOf(cluster.getClusterId());
-        } catch (AmbariException e) {
-        }
-      }
-
-      LOG.debug("Received command report: " + report);
-      // Fetch HostRoleCommand that corresponds to a given task ID
-      HostRoleCommand hostRoleCommand = hostRoleCommandIterator.next();
-      HostEntity hostEntity = hostDAO.findByName(hostname);
-      if (hostEntity == null) {
-        LOG.error("Received a command report and was unable to retrieve HostEntity for hostname = " + hostname);
-        continue;
-      }
-
-      // Send event for final command reports for actions
-      if (RoleCommand.valueOf(report.getRoleCommand()) == RoleCommand.ACTIONEXECUTE &&
-          HostRoleStatus.valueOf(report.getStatus()).isCompletedState()) {
-        ActionFinalReportReceivedEvent event = new ActionFinalReportReceivedEvent(
-                clusterId, hostname, report, false);
-        ambariEventPublisher.publish(event);
-      }
-
-      // Skip sending events for command reports for ABORTed commands
-      if (hostRoleCommand.getStatus() == HostRoleStatus.ABORTED) {
-        continue;
-      }
-      if (hostRoleCommand.getStatus() == HostRoleStatus.QUEUED &&
-              report.getStatus().equals("IN_PROGRESS")) {
-        hostRoleCommand.setStartTime(now);
-      }
-
-      // If the report indicates the keytab file was successfully transferred to a host or removed
-      // from a host, record this for future reference
-      if (Service.Type.KERBEROS.name().equalsIgnoreCase(report.getServiceName()) &&
-          Role.KERBEROS_CLIENT.name().equalsIgnoreCase(report.getRole()) &&
-          RoleCommand.CUSTOM_COMMAND.name().equalsIgnoreCase(report.getRoleCommand()) &&
-          RequestExecution.Status.COMPLETED.name().equalsIgnoreCase(report.getStatus())) {
-
-        String customCommand = report.getCustomCommand();
-
-        boolean adding = "SET_KEYTAB".equalsIgnoreCase(customCommand);
-        if (adding || "REMOVE_KEYTAB".equalsIgnoreCase(customCommand)) {
-          WriteKeytabsStructuredOut writeKeytabsStructuredOut;
-          try {
-            writeKeytabsStructuredOut = gson.fromJson(report.getStructuredOut(), WriteKeytabsStructuredOut.class);
-          } catch (JsonSyntaxException ex) {
-            //Json structure was incorrect do nothing, pass this data further for processing
-            writeKeytabsStructuredOut = null;
-          }
-
-          if (writeKeytabsStructuredOut != null) {
-            Map<String, String> keytabs = writeKeytabsStructuredOut.getKeytabs();
-            if (keytabs != null) {
-              for (Map.Entry<String, String> entry : keytabs.entrySet()) {
-                String principal = entry.getKey();
-                if (!kerberosPrincipalHostDAO.exists(principal, hostEntity.getHostId())) {
-                  if (adding) {
-                    kerberosPrincipalHostDAO.create(principal, hostEntity.getHostId());
-                  } else if ("_REMOVED_".equalsIgnoreCase(entry.getValue())) {
-                    kerberosPrincipalHostDAO.remove(principal, hostEntity.getHostId());
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-
-      //pass custom START, STOP and RESTART
-      if (RoleCommand.ACTIONEXECUTE.toString().equals(report.getRoleCommand()) ||
-         (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) &&
-         !("RESTART".equals(report.getCustomCommand()) ||
-         "START".equals(report.getCustomCommand()) ||
-         "STOP".equals(report.getCustomCommand())))) {
-        continue;
-      }
-
-      Cluster cl = clusterFsm.getCluster(report.getClusterName());
-      String service = report.getServiceName();
-      if (service == null || service.isEmpty()) {
-        throw new AmbariException("Invalid command report, service: " + service);
-      }
-      if (actionMetadata.getActions(service.toLowerCase()).contains(report.getRole())) {
-        LOG.debug(report.getRole() + " is an action - skip component lookup");
-      } else {
-        try {
-          Service svc = cl.getService(service);
-          ServiceComponent svcComp = svc.getServiceComponent(report.getRole());
-          ServiceComponentHost scHost = svcComp.getServiceComponentHost(hostname);
-          String schName = scHost.getServiceComponentName();
-
-          if (report.getStatus().equals(HostRoleStatus.COMPLETED.toString())) {
-
-            // Reading component version if it is present
-            if (StringUtils.isNotBlank(report.getStructuredOut())) {
-              ComponentVersionStructuredOut structuredOutput = null;
-              try {
-                structuredOutput = gson.fromJson(report.getStructuredOut(), ComponentVersionStructuredOut.class);
-              } catch (JsonSyntaxException ex) {
-                //Json structure for component version was incorrect
-                //do nothing, pass this data further for processing
-              }
-
-              String newVersion = structuredOutput == null ? null : structuredOutput.version;
-
-              // Pass true to always publish a version event.  It is safer to recalculate the version even if we don't
-              // detect a difference in the value.  This is useful in case that a manual database edit is done while
-              // ambari-server is stopped.
-              handleComponentVersionReceived(cl, scHost, newVersion, true);
-            }
-
-            // Updating stack version, if needed (this is not actually for express/rolling upgrades!)
-            if (scHost.getState().equals(State.UPGRADING)) {
-              scHost.setStackVersion(scHost.getDesiredStackVersion());
-            } else if ((report.getRoleCommand().equals(RoleCommand.START.toString()) ||
-                (report.getRoleCommand().equals(RoleCommand.CUSTOM_COMMAND.toString()) &&
-                    ("START".equals(report.getCustomCommand()) ||
-                    "RESTART".equals(report.getCustomCommand()))))
-                && null != report.getConfigurationTags()
-                && !report.getConfigurationTags().isEmpty()) {
-              LOG.info("Updating applied config on service " + scHost.getServiceName() +
-                ", component " + scHost.getServiceComponentName() + ", host " + scHost.getHostName());
-              scHost.updateActualConfigs(report.getConfigurationTags());
-              scHost.setRestartRequired(false);
-            }
-            // Necessary for resetting clients stale configs after starting service
-            if ((RoleCommand.INSTALL.toString().equals(report.getRoleCommand()) ||
-                (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) &&
-                "INSTALL".equals(report.getCustomCommand()))) && svcComp.isClientComponent()){
-              scHost.updateActualConfigs(report.getConfigurationTags());
-              scHost.setRestartRequired(false);
-            }
-            if (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) &&
-                !("START".equals(report.getCustomCommand()) ||
-                 "STOP".equals(report.getCustomCommand()))) {
-              //do not affect states for custom commands except START and STOP
-              //lets status commands to be responsible for this
-              continue;
-            }
-
-            if (RoleCommand.START.toString().equals(report.getRoleCommand()) ||
-                (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) &&
-                    "START".equals(report.getCustomCommand()))) {
-              scHost.handleEvent(new ServiceComponentHostStartedEvent(schName,
-                  hostname, now));
-              scHost.setRestartRequired(false);
-            } else if (RoleCommand.STOP.toString().equals(report.getRoleCommand()) ||
-                (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) &&
-                    "STOP".equals(report.getCustomCommand()))) {
-              scHost.handleEvent(new ServiceComponentHostStoppedEvent(schName,
-                  hostname, now));
-            } else {
-              scHost.handleEvent(new ServiceComponentHostOpSucceededEvent(schName,
-                  hostname, now));
-            }
-          } else if (report.getStatus().equals("FAILED")) {
-
-            if (StringUtils.isNotBlank(report.getStructuredOut())) {
-              try {
-                ComponentVersionStructuredOut structuredOutput = gson.fromJson(report.getStructuredOut(), ComponentVersionStructuredOut.class);
-
-                if (null != structuredOutput.upgradeDirection && structuredOutput.upgradeDirection.isUpgrade()) {
-                  scHost.setUpgradeState(UpgradeState.FAILED);
-                }
-              } catch (JsonSyntaxException ex) {
-                LOG.warn("Structured output was found, but not parseable: {}", report.getStructuredOut());
-              }
-            }
-
-            LOG.warn("Operation failed - may be retried. Service component host: "
-                + schName + ", host: " + hostname + " Action id" + report.getActionId());
-            if (actionManager.isInProgressCommand(report)) {
-              scHost.handleEvent(new ServiceComponentHostOpFailedEvent
-                (schName, hostname, now));
-            } else {
-              LOG.info("Received report for a command that is no longer active. " + report);
-            }
-          } else if (report.getStatus().equals("IN_PROGRESS")) {
-            scHost.handleEvent(new ServiceComponentHostOpInProgressEvent(schName,
-                hostname, now));
-          }
-        } catch (ServiceComponentNotFoundException scnex) {
-          LOG.warn("Service component not found ", scnex);
-        } catch (InvalidStateTransitionException ex) {
-          if (LOG.isDebugEnabled()) {
-            LOG.warn("State machine exception.", ex);
-          } else {
-            LOG.warn("State machine exception. " + ex.getMessage());
-          }
-        }
-      }
-    }
-
-    //Update state machines from reports
-    actionManager.processTaskResponse(hostname, reports, commands);
-  }
-
-  protected void processStatusReports(HeartBeat heartbeat,
-                                      String hostname,
-                                      Clusters clusterFsm)
-      throws AmbariException {
-    Set<Cluster> clusters = clusterFsm.getClustersForHost(hostname);
-    for (Cluster cl : clusters) {
-      for (ComponentStatus status : heartbeat.componentStatus) {
-        if (status.getClusterName().equals(cl.getClusterName())) {
-          try {
-            Service svc = cl.getService(status.getServiceName());
-
-            String componentName = status.getComponentName();
-            if (svc.getServiceComponents().containsKey(componentName)) {
-              ServiceComponent svcComp = svc.getServiceComponent(
-                  componentName);
-              ServiceComponentHost scHost = svcComp.getServiceComponentHost(
-                  hostname);
-              State prevState = scHost.getState();
-              State liveState = State.valueOf(State.class, status.getStatus());
-              if (prevState.equals(State.INSTALLED)
-                  || prevState.equals(State.STARTED)
-                  || prevState.equals(State.STARTING)
-                  || prevState.equals(State.STOPPING)
-                  || prevState.equals(State.UNKNOWN)) {
-                scHost.setState(liveState); //TODO direct status set breaks state machine sometimes !!!
-                if (!prevState.equals(liveState)) {
-                  LOG.info("State of service component " + componentName
-                      + " of service " + status.getServiceName()
-                      + " of cluster " + status.getClusterName()
-                      + " has changed from " + prevState + " to " + liveState
-                      + " at host " + hostname);
-                }
-              }
-
-              SecurityState prevSecurityState = scHost.getSecurityState();
-              SecurityState currentSecurityState = SecurityState.valueOf(status.getSecurityState());
-              if((prevSecurityState != currentSecurityState)) {
-                if(prevSecurityState.isEndpoint()) {
-                  scHost.setSecurityState(currentSecurityState);
-                  LOG.info(String.format("Security of service component %s of service %s of cluster %s " +
-                          "has changed from %s to %s on host %s",
-                      componentName, status.getServiceName(), status.getClusterName(), prevSecurityState,
-                      currentSecurityState, hostname));
-                }
-                else {
-                  LOG.debug(String.format("Security of service component %s of service %s of cluster %s " +
-                          "has changed from %s to %s on host %s but will be ignored since %s is a " +
-                          "transitional state",
-                      componentName, status.getServiceName(), status.getClusterName(),
-                      prevSecurityState, currentSecurityState, hostname, prevSecurityState));
-                }
-              }
-
-              if (null != status.getStackVersion() && !status.getStackVersion().isEmpty()) {
-                scHost.setStackVersion(gson.fromJson(status.getStackVersion(), StackId.class));
-              }
-
-              if (null != status.getConfigTags()) {
-                scHost.updateActualConfigs(status.getConfigTags());
-              }
-
-              Map<String, Object> extra = status.getExtra();
-              if (null != extra && !extra.isEmpty()) {
-                try {
-                  if (extra.containsKey("processes")) {
-                    @SuppressWarnings("unchecked")
-                    List<Map<String, String>> list = (List<Map<String, String>>) extra.get("processes");
-                    scHost.setProcesses(list);
-                  }
-                  if (extra.containsKey("version")) {
-                    String version = extra.get("version").toString();
-
-                    handleComponentVersionReceived(cl, scHost, version, false);
-                  }
-
-                } catch (Exception e) {
-                  LOG.error("Could not access extra JSON for " +
-                      scHost.getServiceComponentName() + " from " +
-                      scHost.getHostName() + ": " + status.getExtra() +
-                      " (" + e.getMessage() + ")");
-                }
-              }
-
-              this.heartbeatMonitor.getAgentRequests()
-                  .setExecutionDetailsRequest(hostname, componentName, status.getSendExecCmdDet());
-            } else {
-              // TODO: What should be done otherwise?
-            }
-          } catch (ServiceNotFoundException e) {
-            LOG.warn("Received a live status update for a non-initialized"
-                + " service"
-                + ", clusterName=" + status.getClusterName()
-                + ", serviceName=" + status.getServiceName());
-            // FIXME ignore invalid live update and continue for now?
-            continue;
-          } catch (ServiceComponentNotFoundException e) {
-            LOG.warn("Received a live status update for a non-initialized"
-                + " servicecomponent"
-                + ", clusterName=" + status.getClusterName()
-                + ", serviceName=" + status.getServiceName()
-                + ", componentName=" + status.getComponentName());
-            // FIXME ignore invalid live update and continue for now?
-            continue;
-          } catch (ServiceComponentHostNotFoundException e) {
-            LOG.warn("Received a live status update for a non-initialized"
-                + " service"
-                + ", clusterName=" + status.getClusterName()
-                + ", serviceName=" + status.getServiceName()
-                + ", componentName=" + status.getComponentName()
-                + ", hostname=" + hostname);
-            // FIXME ignore invalid live update and continue for now?
-            continue;
-          } catch (RuntimeException e) {
-            LOG.warn("Received a live status with invalid payload"
-                + " service"
-                + ", clusterName=" + status.getClusterName()
-                + ", serviceName=" + status.getServiceName()
-                + ", componentName=" + status.getComponentName()
-                + ", hostname=" + hostname
-                + ", error=" + e.getMessage());
-            continue;
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Updates the version of the given service component, sets the upgrade state (if needed)
-   * and publishes a version event through the version event publisher.
-   *
-   * @param cluster        the cluster
-   * @param scHost         service component host
-   * @param newVersion     new version of service component
-   * @param alwaysPublish  if true, always publish a version event; if false,
-   *                       only publish if the component version was updated
-   */
-  private void handleComponentVersionReceived(Cluster cluster, ServiceComponentHost scHost,
-                                              String newVersion, boolean alwaysPublish) {
-
-    boolean updated = false;
-
-    if (StringUtils.isNotBlank(newVersion)) {
-      final String previousVersion = scHost.getVersion();
-      if (!StringUtils.equals(previousVersion, newVersion)) {
-        scHost.setVersion(newVersion);
-        scHost.setStackVersion(cluster.getDesiredStackVersion());
-        if (previousVersion != null && !previousVersion.equalsIgnoreCase(State.UNKNOWN.toString())) {
-          scHost.setUpgradeState(UpgradeState.COMPLETE);
-        }
-        updated = true;
-      }
-    }
-
-    if (updated || alwaysPublish) {
-      HostComponentVersionEvent event = new HostComponentVersionEvent(cluster, scHost);
-      versionEventPublisher.publish(event);
-    }
-  }
 
   /**
    * Adds commands from action queue to a heartbeat response.
@@ -1229,35 +702,4 @@ public class HeartBeatHandler {
     }
   }
 
-  /**
-   * This class is used for mapping json of structured output for component START action.
-   */
-  private static class ComponentVersionStructuredOut {
-    @SerializedName("version")
-    private String version;
-
-    @SerializedName("upgrade_type")
-    private UpgradeType upgradeType = null;
-
-    @SerializedName("direction")
-    private Direction upgradeDirection = null;
-
-  }
-
-  /**
-   * This class is used for mapping json of structured output for keytab distribution actions.
-   */
-  private static class WriteKeytabsStructuredOut {
-    @SerializedName("keytabs")
-    private Map<String,String> keytabs;
-
-    public Map<String, String> getKeytabs() {
-      return keytabs;
-    }
-
-    public void setKeytabs(Map<String, String> keytabs) {
-      this.keytabs = keytabs;
-    }
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/083ac6da/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
index efc717d..378e123 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
@@ -140,6 +140,10 @@ public class HeartbeatMonitor implements Runnable {
     List<Host> allHosts = clusters.getHosts();
     long now = System.currentTimeMillis();
     for (Host hostObj : allHosts) {
+      if (hostObj.getState() == HostState.HEARTBEAT_LOST) {
+        //do not check if host already known be lost
+        continue;
+      }
       String host = hostObj.getHostName();
       HostState hostState = hostObj.getState();
       String hostname = hostObj.getHostName();
@@ -212,6 +216,8 @@ public class HeartbeatMonitor implements Runnable {
         switch (sch.getState()) {
           case INIT:
           case INSTALLING:
+          case STARTING:
+          case STOPPING:
             //don't send commands until component is installed at least
             continue;
           default:

http://git-wip-us.apache.org/repos/asf/ambari/blob/083ac6da/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
new file mode 100644
index 0000000..2188a77
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
@@ -0,0 +1,773 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.agent;
+
+
+import com.google.common.util.concurrent.AbstractScheduledService;
+import com.google.common.util.concurrent.AbstractService;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.gson.Gson;
+import com.google.gson.JsonSyntaxException;
+import com.google.gson.annotations.SerializedName;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.Role;
+import org.apache.ambari.server.RoleCommand;
+import org.apache.ambari.server.ServiceComponentHostNotFoundException;
+import org.apache.ambari.server.ServiceComponentNotFoundException;
+import org.apache.ambari.server.ServiceNotFoundException;
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.HostRoleCommand;
+import org.apache.ambari.server.actionmanager.HostRoleStatus;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.controller.MaintenanceStateHelper;
+import org.apache.ambari.server.events.ActionFinalReportReceivedEvent;
+import org.apache.ambari.server.events.AlertEvent;
+import org.apache.ambari.server.events.AlertReceivedEvent;
+import org.apache.ambari.server.events.HostComponentVersionEvent;
+import org.apache.ambari.server.events.publishers.AlertEventPublisher;
+import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
+import org.apache.ambari.server.events.publishers.VersionEventPublisher;
+import org.apache.ambari.server.metadata.ActionMetadata;
+import org.apache.ambari.server.orm.dao.KerberosPrincipalHostDAO;
+import org.apache.ambari.server.state.Alert;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.HostHealthStatus;
+import org.apache.ambari.server.state.MaintenanceState;
+import org.apache.ambari.server.state.SecurityState;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.State;
+import org.apache.ambari.server.state.UpgradeState;
+import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
+import org.apache.ambari.server.state.scheduler.RequestExecution;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
+import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
+import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpFailedEvent;
+import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpInProgressEvent;
+import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpSucceededEvent;
+import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartedEvent;
+import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStoppedEvent;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * HeartbeatProcessor class is used for bulk processing data retrieved from agents in background
+ *
+ */
+public class HeartbeatProcessor extends AbstractService{
+  private static final Logger LOG = LoggerFactory.getLogger(HeartbeatProcessor.class);
+
+  private ScheduledExecutorService executor;
+
+  private ConcurrentLinkedQueue<HeartBeat> heartBeatsQueue = new ConcurrentLinkedQueue<>();
+
+  private volatile boolean shouldRun = true;
+
+  //TODO rewrite to correlate with heartbeat frequency, hardcoded in agent as of now
+  private long delay = 5000;
+  private long period = 1000;
+
+  private int poolSize = 1;
+
+  private Clusters clusterFsm;
+  private HeartbeatMonitor heartbeatMonitor;
+  private Injector injector;
+  private ActionManager actionManager;
+
+  /**
+   * Publishes {@link AlertEvent} instances.
+   */
+  @Inject
+  AlertEventPublisher alertEventPublisher;
+
+  @Inject
+  AmbariEventPublisher ambariEventPublisher;
+
+  @Inject
+  VersionEventPublisher versionEventPublisher;
+
+  @Inject
+  ActionMetadata actionMetadata;
+
+  @Inject
+  MaintenanceStateHelper maintenanceStateHelper;
+
+  @Inject
+  AmbariMetaInfo ambariMetaInfo;
+
+  @Inject
+  KerberosPrincipalHostDAO kerberosPrincipalHostDAO;
+
+  @Inject
+  Gson gson;
+
+  @Inject
+  public HeartbeatProcessor(Clusters clusterFsm, ActionManager am, HeartbeatMonitor heartbeatMonitor,
+                            Injector injector) {
+    injector.injectMembers(this);
+
+    this.injector = injector;
+    this.heartbeatMonitor = heartbeatMonitor;
+    this.clusterFsm = clusterFsm;
+    actionManager = am;
+    ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("ambari-heartbeat-processor-%d").build();
+    executor = Executors.newScheduledThreadPool(poolSize, threadFactory);
+  }
+
+  @Override
+  protected void doStart() {
+    LOG.info("**** Starting heartbeats processing threads ****");
+    for (int i=0; i< poolSize; i++) {
+      executor.scheduleAtFixedRate(new HeartbeatProcessingTask(), delay, period, TimeUnit.MILLISECONDS);
+    }
+  }
+
+  @Override
+  protected void doStop() {
+    LOG.info("**** Stopping heartbeats processing threads ****");
+    shouldRun = false;
+    executor.shutdown();
+  }
+
+  public void addHeartbeat(HeartBeat heartBeat) {
+    heartBeatsQueue.add(heartBeat);
+  }
+
+  private HeartBeat pollHeartbeat() {
+    return heartBeatsQueue.poll();
+  }
+
+  /**
+   * Processing task to be scheduled for execution
+   */
+  private class HeartbeatProcessingTask implements Runnable {
+
+    @Override
+    public void run() {
+      while (shouldRun) {
+        try {
+          HeartBeat heartbeat = pollHeartbeat();
+          if (heartbeat == null) {
+            break;
+          }
+          processHeartbeat(heartbeat);
+        } catch (Exception e) {
+          LOG.error("Exception received while processing heartbeat", e);
+        } catch (Throwable throwable) {
+          //catch everything to prevent task suppression
+          LOG.error("ERROR: ", throwable);
+        }
+
+
+      }
+    }
+  }
+
+  /**
+   * Incapsulates logic for processing data from agent heartbeat
+   * @param heartbeat Agent heartbeat object
+   * @throws AmbariException
+   */
+  public void processHeartbeat(HeartBeat heartbeat) throws AmbariException {
+    long now = System.currentTimeMillis();
+
+    processAlerts(heartbeat);
+
+    processCommandReports(heartbeat, now);
+    processStatusReports(heartbeat);
+    //host status calculation are based on task and status reports, should be performed last
+    processHostStatus(heartbeat);
+  }
+
+
+
+  /**
+   * Extracts all of the {@link Alert}s from the heartbeat and fires
+   * {@link AlertEvent}s for each one. If there is a problem looking up the
+   * cluster, then alerts will not be processed.
+   *
+   * @param heartbeat
+   *          the heartbeat to process.
+   */
+  protected void processAlerts(HeartBeat heartbeat) {
+    if (heartbeat == null) {
+      return;
+    }
+
+    String hostname = heartbeat.getHostname();
+
+    if (null != heartbeat.getAlerts()) {
+      AlertEvent event = new AlertReceivedEvent(heartbeat.getAlerts());
+      for (Alert alert : event.getAlerts()) {
+        if (alert.getHostName() == null) {
+          alert.setHostName(hostname);
+        }
+      }
+      alertEventPublisher.publish(event);
+
+    }
+  }
+
+  /**
+   * Update host status basing on components statuses
+   * @param heartbeat heartbeat to process
+   * @throws AmbariException
+   */
+  protected void processHostStatus(HeartBeat heartbeat) throws AmbariException {
+
+    String hostname = heartbeat.getHostname();
+    Host host = clusterFsm.getHost(hostname);
+    HostHealthStatus.HealthStatus healthStatus = host.getHealthStatus().getHealthStatus();
+
+    if (!healthStatus.equals(HostHealthStatus.HealthStatus.UNKNOWN)) {
+
+      List<ComponentStatus> componentStatuses = heartbeat.getComponentStatus();
+      //Host status info could be calculated only if agent returned statuses in heartbeat
+      //Or, if a command is executed that can change component status
+      boolean calculateHostStatus = false;
+      String clusterName = null;
+      if (componentStatuses.size() > 0) {
+        calculateHostStatus = true;
+        for (ComponentStatus componentStatus : componentStatuses) {
+          clusterName = componentStatus.getClusterName();
+          break;
+        }
+      }
+
+      if (!calculateHostStatus) {
+        List<CommandReport> reports = heartbeat.getReports();
+        for (CommandReport report : reports) {
+          if (RoleCommand.ACTIONEXECUTE.toString().equals(report.getRoleCommand())) {
+            continue;
+          }
+
+          String service = report.getServiceName();
+          if (actionMetadata.getActions(service.toLowerCase()).contains(report.getRole())) {
+            continue;
+          }
+          if (report.getStatus().equals("COMPLETED")) {
+            calculateHostStatus = true;
+            clusterName = report.getClusterName();
+            break;
+          }
+        }
+      }
+
+      if (calculateHostStatus) {
+        //Use actual component status to compute the host status
+        int masterCount = 0;
+        int mastersRunning = 0;
+        int slaveCount = 0;
+        int slavesRunning = 0;
+
+        StackId stackId;
+        Cluster cluster = clusterFsm.getCluster(clusterName);
+        stackId = cluster.getDesiredStackVersion();
+
+
+        List<ServiceComponentHost> scHosts = cluster.getServiceComponentHosts(heartbeat.getHostname());
+        for (ServiceComponentHost scHost : scHosts) {
+          ComponentInfo componentInfo =
+              ambariMetaInfo.getComponent(stackId.getStackName(),
+                  stackId.getStackVersion(), scHost.getServiceName(),
+                  scHost.getServiceComponentName());
+
+          String status = scHost.getState().name();
+
+          String category = componentInfo.getCategory();
+
+          if (MaintenanceState.OFF == maintenanceStateHelper.getEffectiveState(scHost, host)) {
+            if (category.equals("MASTER")) {
+              ++masterCount;
+              if (status.equals("STARTED")) {
+                ++mastersRunning;
+              }
+            } else if (category.equals("SLAVE")) {
+              ++slaveCount;
+              if (status.equals("STARTED")) {
+                ++slavesRunning;
+              }
+            }
+          }
+        }
+
+        if (masterCount == mastersRunning && slaveCount == slavesRunning) {
+          healthStatus = HostHealthStatus.HealthStatus.HEALTHY;
+        } else if (masterCount > 0 && mastersRunning < masterCount) {
+          healthStatus = HostHealthStatus.HealthStatus.UNHEALTHY;
+        } else {
+          healthStatus = HostHealthStatus.HealthStatus.ALERT;
+        }
+
+        host.setStatus(healthStatus.name());
+        host.persist();
+      }
+
+      //If host doesn't belong to any cluster
+      if ((clusterFsm.getClustersForHost(host.getHostName())).size() == 0) {
+        healthStatus = HostHealthStatus.HealthStatus.HEALTHY;
+        host.setStatus(healthStatus.name());
+        host.persist();
+      }
+    }
+  }
+
+  /**
+   * Process reports of tasks executed on agents
+   * @param heartbeat heartbeat to process
+   * @param now cached current time
+   * @throws AmbariException
+   */
+  protected void processCommandReports(
+      HeartBeat heartbeat, long now)
+      throws AmbariException {
+    String hostname = heartbeat.getHostname();
+    List<CommandReport> reports = heartbeat.getReports();
+
+    // Cache HostRoleCommand entities because we will need them few times
+    List<Long> taskIds = new ArrayList<Long>();
+    for (CommandReport report : reports) {
+      taskIds.add(report.getTaskId());
+    }
+    Collection<HostRoleCommand> commands = actionManager.getTasks(taskIds);
+
+    Iterator<HostRoleCommand> hostRoleCommandIterator = commands.iterator();
+    for (CommandReport report : reports) {
+
+      Long clusterId = null;
+      if (report.getClusterName() != null) {
+        try {
+          Cluster cluster = clusterFsm.getCluster(report.getClusterName());
+          clusterId = cluster.getClusterId();
+        } catch (AmbariException e) {
+        }
+      }
+
+      LOG.debug("Received command report: " + report);
+      // Fetch HostRoleCommand that corresponds to a given task ID
+      HostRoleCommand hostRoleCommand = hostRoleCommandIterator.next();
+      Host host = clusterFsm.getHost(hostname);
+//      HostEntity hostEntity = hostDAO.findByName(hostname); //don't touch database
+      if (host == null) {
+        LOG.error("Received a command report and was unable to retrieve Host for hostname = " + hostname);
+        continue;
+      }
+
+      // Send event for final command reports for actions
+      if (RoleCommand.valueOf(report.getRoleCommand()) == RoleCommand.ACTIONEXECUTE &&
+          HostRoleStatus.valueOf(report.getStatus()).isCompletedState()) {
+        ActionFinalReportReceivedEvent event = new ActionFinalReportReceivedEvent(
+            clusterId, hostname, report, false);
+        ambariEventPublisher.publish(event);
+      }
+
+      // Skip sending events for command reports for ABORTed commands
+      if (hostRoleCommand.getStatus() == HostRoleStatus.ABORTED) {
+        continue;
+      }
+      if (hostRoleCommand.getStatus() == HostRoleStatus.QUEUED &&
+          report.getStatus().equals("IN_PROGRESS")) {
+        hostRoleCommand.setStartTime(now);
+      }
+
+      // If the report indicates the keytab file was successfully transferred to a host or removed
+      // from a host, record this for future reference
+      if (Service.Type.KERBEROS.name().equalsIgnoreCase(report.getServiceName()) &&
+          Role.KERBEROS_CLIENT.name().equalsIgnoreCase(report.getRole()) &&
+          RoleCommand.CUSTOM_COMMAND.name().equalsIgnoreCase(report.getRoleCommand()) &&
+          RequestExecution.Status.COMPLETED.name().equalsIgnoreCase(report.getStatus())) {
+
+        String customCommand = report.getCustomCommand();
+
+        boolean adding = "SET_KEYTAB".equalsIgnoreCase(customCommand);
+        if (adding || "REMOVE_KEYTAB".equalsIgnoreCase(customCommand)) {
+          WriteKeytabsStructuredOut writeKeytabsStructuredOut;
+          try {
+            writeKeytabsStructuredOut = gson.fromJson(report.getStructuredOut(), WriteKeytabsStructuredOut.class);
+          } catch (JsonSyntaxException ex) {
+            //Json structure was incorrect do nothing, pass this data further for processing
+            writeKeytabsStructuredOut = null;
+          }
+
+          if (writeKeytabsStructuredOut != null) {
+            Map<String, String> keytabs = writeKeytabsStructuredOut.getKeytabs();
+            if (keytabs != null) {
+              for (Map.Entry<String, String> entry : keytabs.entrySet()) {
+                String principal = entry.getKey();
+                if (!kerberosPrincipalHostDAO.exists(principal, host.getHostId())) {
+                  if (adding) {
+                    kerberosPrincipalHostDAO.create(principal, host.getHostId());
+                  } else if ("_REMOVED_".equalsIgnoreCase(entry.getValue())) {
+                    kerberosPrincipalHostDAO.remove(principal, host.getHostId());
+                  }
+                }
+              }
+            }
+          }
+        }
+      }
+
+      //pass custom START, STOP and RESTART
+      if (RoleCommand.ACTIONEXECUTE.toString().equals(report.getRoleCommand()) ||
+          (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) &&
+              !("RESTART".equals(report.getCustomCommand()) ||
+                  "START".equals(report.getCustomCommand()) ||
+                  "STOP".equals(report.getCustomCommand())))) {
+        continue;
+      }
+
+      Cluster cl = clusterFsm.getCluster(report.getClusterName());
+      String service = report.getServiceName();
+      if (service == null || service.isEmpty()) {
+        throw new AmbariException("Invalid command report, service: " + service);
+      }
+      if (actionMetadata.getActions(service.toLowerCase()).contains(report.getRole())) {
+        LOG.debug(report.getRole() + " is an action - skip component lookup");
+      } else {
+        try {
+          Service svc = cl.getService(service);
+          ServiceComponent svcComp = svc.getServiceComponent(report.getRole());
+          ServiceComponentHost scHost = svcComp.getServiceComponentHost(hostname);
+          String schName = scHost.getServiceComponentName();
+
+          if (report.getStatus().equals(HostRoleStatus.COMPLETED.toString())) {
+
+            // Reading component version if it is present
+            if (StringUtils.isNotBlank(report.getStructuredOut())) {
+              ComponentVersionStructuredOut structuredOutput = null;
+              try {
+                structuredOutput = gson.fromJson(report.getStructuredOut(), ComponentVersionStructuredOut.class);
+              } catch (JsonSyntaxException ex) {
+                //Json structure for component version was incorrect
+                //do nothing, pass this data further for processing
+              }
+
+              String newVersion = structuredOutput == null ? null : structuredOutput.version;
+
+              // Pass true to always publish a version event.  It is safer to recalculate the version even if we don't
+              // detect a difference in the value.  This is useful in case that a manual database edit is done while
+              // ambari-server is stopped.
+              handleComponentVersionReceived(cl, scHost, newVersion, true);
+            }
+
+            // Updating stack version, if needed (this is not actually for express/rolling upgrades!)
+            if (scHost.getState().equals(org.apache.ambari.server.state.State.UPGRADING)) {
+              scHost.setStackVersion(scHost.getDesiredStackVersion());
+            } else if ((report.getRoleCommand().equals(RoleCommand.START.toString()) ||
+                (report.getRoleCommand().equals(RoleCommand.CUSTOM_COMMAND.toString()) &&
+                    ("START".equals(report.getCustomCommand()) ||
+                        "RESTART".equals(report.getCustomCommand()))))
+                && null != report.getConfigurationTags()
+                && !report.getConfigurationTags().isEmpty()) {
+              LOG.info("Updating applied config on service " + scHost.getServiceName() +
+                  ", component " + scHost.getServiceComponentName() + ", host " + scHost.getHostName());
+              scHost.updateActualConfigs(report.getConfigurationTags());
+              scHost.setRestartRequired(false);
+            }
+            // Necessary for resetting clients stale configs after starting service
+            if ((RoleCommand.INSTALL.toString().equals(report.getRoleCommand()) ||
+                (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) &&
+                    "INSTALL".equals(report.getCustomCommand()))) && svcComp.isClientComponent()){
+              scHost.updateActualConfigs(report.getConfigurationTags());
+              scHost.setRestartRequired(false);
+            }
+            if (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) &&
+                !("START".equals(report.getCustomCommand()) ||
+                    "STOP".equals(report.getCustomCommand()))) {
+              //do not affect states for custom commands except START and STOP
+              //lets status commands to be responsible for this
+              continue;
+            }
+
+            if (RoleCommand.START.toString().equals(report.getRoleCommand()) ||
+                (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) &&
+                    "START".equals(report.getCustomCommand()))) {
+              scHost.handleEvent(new ServiceComponentHostStartedEvent(schName,
+                  hostname, now));
+              scHost.setRestartRequired(false);
+            } else if (RoleCommand.STOP.toString().equals(report.getRoleCommand()) ||
+                (RoleCommand.CUSTOM_COMMAND.toString().equals(report.getRoleCommand()) &&
+                    "STOP".equals(report.getCustomCommand()))) {
+              scHost.handleEvent(new ServiceComponentHostStoppedEvent(schName,
+                  hostname, now));
+            } else {
+              scHost.handleEvent(new ServiceComponentHostOpSucceededEvent(schName,
+                  hostname, now));
+            }
+          } else if (report.getStatus().equals("FAILED")) {
+
+            if (StringUtils.isNotBlank(report.getStructuredOut())) {
+              try {
+                ComponentVersionStructuredOut structuredOutput = gson.fromJson(report.getStructuredOut(), ComponentVersionStructuredOut.class);
+
+                if (null != structuredOutput.upgradeDirection && structuredOutput.upgradeDirection.isUpgrade()) {
+                  scHost.setUpgradeState(UpgradeState.FAILED);
+                }
+              } catch (JsonSyntaxException ex) {
+                LOG.warn("Structured output was found, but not parseable: {}", report.getStructuredOut());
+              }
+            }
+
+            LOG.warn("Operation failed - may be retried. Service component host: "
+                + schName + ", host: " + hostname + " Action id" + report.getActionId());
+            if (actionManager.isInProgressCommand(report)) {
+              scHost.handleEvent(new ServiceComponentHostOpFailedEvent
+                  (schName, hostname, now));
+            } else {
+              LOG.info("Received report for a command that is no longer active. " + report);
+            }
+          } else if (report.getStatus().equals("IN_PROGRESS")) {
+            scHost.handleEvent(new ServiceComponentHostOpInProgressEvent(schName,
+                hostname, now));
+          }
+        } catch (ServiceComponentNotFoundException scnex) {
+          LOG.warn("Service component not found ", scnex);
+        } catch (InvalidStateTransitionException ex) {
+          if (LOG.isDebugEnabled()) {
+            LOG.warn("State machine exception.", ex);
+          } else {
+            LOG.warn("State machine exception. " + ex.getMessage());
+          }
+        }
+      }
+    }
+
+    //Update state machines from reports
+    actionManager.processTaskResponse(hostname, reports, commands);
+  }
+
+  /**
+   * Process reports of status commands
+   * @param heartbeat heartbeat to process
+   * @throws AmbariException
+   */
+  protected void processStatusReports(HeartBeat heartbeat) throws AmbariException {
+    String hostname = heartbeat.getHostname();
+    Set<Cluster> clusters = clusterFsm.getClustersForHost(hostname);
+    for (Cluster cl : clusters) {
+      for (ComponentStatus status : heartbeat.componentStatus) {
+        if (status.getClusterName().equals(cl.getClusterName())) {
+          try {
+            Service svc = cl.getService(status.getServiceName());
+
+            String componentName = status.getComponentName();
+            if (svc.getServiceComponents().containsKey(componentName)) {
+              ServiceComponent svcComp = svc.getServiceComponent(
+                  componentName);
+              ServiceComponentHost scHost = svcComp.getServiceComponentHost(
+                  hostname);
+              org.apache.ambari.server.state.State prevState = scHost.getState();
+              org.apache.ambari.server.state.State liveState =
+                  org.apache.ambari.server.state.State.valueOf(org.apache.ambari.server.state.State.class,
+                      status.getStatus());
+              if (prevState.equals(org.apache.ambari.server.state.State.INSTALLED)
+                  || prevState.equals(org.apache.ambari.server.state.State.STARTED)
+                  || prevState.equals(org.apache.ambari.server.state.State.STARTING)
+                  || prevState.equals(org.apache.ambari.server.state.State.STOPPING)
+                  || prevState.equals(org.apache.ambari.server.state.State.UNKNOWN)) {
+                scHost.setState(liveState); //TODO direct status set breaks state machine sometimes !!!
+                if (!prevState.equals(liveState)) {
+                  LOG.info("State of service component " + componentName
+                      + " of service " + status.getServiceName()
+                      + " of cluster " + status.getClusterName()
+                      + " has changed from " + prevState + " to " + liveState
+                      + " at host " + hostname);
+                }
+              }
+
+              SecurityState prevSecurityState = scHost.getSecurityState();
+              SecurityState currentSecurityState = SecurityState.valueOf(status.getSecurityState());
+              if((prevSecurityState != currentSecurityState)) {
+                if(prevSecurityState.isEndpoint()) {
+                  scHost.setSecurityState(currentSecurityState);
+                  LOG.info(String.format("Security of service component %s of service %s of cluster %s " +
+                          "has changed from %s to %s on host %s",
+                      componentName, status.getServiceName(), status.getClusterName(), prevSecurityState,
+                      currentSecurityState, hostname));
+                }
+                else {
+                  LOG.debug(String.format("Security of service component %s of service %s of cluster %s " +
+                          "has changed from %s to %s on host %s but will be ignored since %s is a " +
+                          "transitional state",
+                      componentName, status.getServiceName(), status.getClusterName(),
+                      prevSecurityState, currentSecurityState, hostname, prevSecurityState));
+                }
+              }
+
+              if (null != status.getStackVersion() && !status.getStackVersion().isEmpty()) {
+                scHost.setStackVersion(gson.fromJson(status.getStackVersion(), StackId.class));
+              }
+
+              if (null != status.getConfigTags()) {
+                scHost.updateActualConfigs(status.getConfigTags());
+              }
+
+              Map<String, Object> extra = status.getExtra();
+              if (null != extra && !extra.isEmpty()) {
+                try {
+                  if (extra.containsKey("processes")) {
+                    @SuppressWarnings("unchecked")
+                    List<Map<String, String>> list = (List<Map<String, String>>) extra.get("processes");
+                    scHost.setProcesses(list);
+                  }
+                  if (extra.containsKey("version")) {
+                    String version = extra.get("version").toString();
+
+                    handleComponentVersionReceived(cl, scHost, version, false);
+                  }
+
+                } catch (Exception e) {
+                  LOG.error("Could not access extra JSON for " +
+                      scHost.getServiceComponentName() + " from " +
+                      scHost.getHostName() + ": " + status.getExtra() +
+                      " (" + e.getMessage() + ")");
+                }
+              }
+
+              this.heartbeatMonitor.getAgentRequests()
+                  .setExecutionDetailsRequest(hostname, componentName, status.getSendExecCmdDet());
+            } else {
+              // TODO: What should be done otherwise?
+            }
+          } catch (ServiceNotFoundException e) {
+            LOG.warn("Received a live status update for a non-initialized"
+                + " service"
+                + ", clusterName=" + status.getClusterName()
+                + ", serviceName=" + status.getServiceName());
+            // FIXME ignore invalid live update and continue for now?
+            continue;
+          } catch (ServiceComponentNotFoundException e) {
+            LOG.warn("Received a live status update for a non-initialized"
+                + " servicecomponent"
+                + ", clusterName=" + status.getClusterName()
+                + ", serviceName=" + status.getServiceName()
+                + ", componentName=" + status.getComponentName());
+            // FIXME ignore invalid live update and continue for now?
+            continue;
+          } catch (ServiceComponentHostNotFoundException e) {
+            LOG.warn("Received a live status update for a non-initialized"
+                + " service"
+                + ", clusterName=" + status.getClusterName()
+                + ", serviceName=" + status.getServiceName()
+                + ", componentName=" + status.getComponentName()
+                + ", hostname=" + hostname);
+            // FIXME ignore invalid live update and continue for now?
+            continue;
+          } catch (RuntimeException e) {
+            LOG.warn("Received a live status with invalid payload"
+                + " service"
+                + ", clusterName=" + status.getClusterName()
+                + ", serviceName=" + status.getServiceName()
+                + ", componentName=" + status.getComponentName()
+                + ", hostname=" + hostname
+                + ", error=" + e.getMessage());
+            continue;
+          }
+        }
+      }
+    }
+  }
+
+
+
+  /**
+   * Updates the version of the given service component, sets the upgrade state (if needed)
+   * and publishes a version event through the version event publisher.
+   *
+   * @param cluster        the cluster
+   * @param scHost         service component host
+   * @param newVersion     new version of service component
+   * @param alwaysPublish  if true, always publish a version event; if false,
+   *                       only publish if the component version was updated
+   */
+  private void handleComponentVersionReceived(Cluster cluster, ServiceComponentHost scHost,
+                                              String newVersion, boolean alwaysPublish) {
+
+    boolean updated = false;
+
+    if (StringUtils.isNotBlank(newVersion)) {
+      final String previousVersion = scHost.getVersion();
+      if (!StringUtils.equals(previousVersion, newVersion)) {
+        scHost.setVersion(newVersion);
+        scHost.setStackVersion(cluster.getDesiredStackVersion());
+        if (previousVersion != null && !previousVersion.equalsIgnoreCase(
+            org.apache.ambari.server.state.State.UNKNOWN.toString())) {
+          scHost.setUpgradeState(UpgradeState.COMPLETE);
+        }
+        updated = true;
+      }
+    }
+
+    if (updated || alwaysPublish) {
+      HostComponentVersionEvent event = new HostComponentVersionEvent(cluster, scHost);
+      versionEventPublisher.publish(event);
+    }
+  }
+
+  /**
+   * This class is used for mapping json of structured output for keytab distribution actions.
+   */
+  private static class WriteKeytabsStructuredOut {
+    @SerializedName("keytabs")
+    private Map<String,String> keytabs;
+
+    public Map<String, String> getKeytabs() {
+      return keytabs;
+    }
+
+    public void setKeytabs(Map<String, String> keytabs) {
+      this.keytabs = keytabs;
+    }
+  }
+
+
+  /**
+   * This class is used for mapping json of structured output for component START action.
+   */
+  private static class ComponentVersionStructuredOut {
+    @SerializedName("version")
+    private String version;
+
+    @SerializedName("upgrade_type")
+    private UpgradeType upgradeType = null;
+
+    @SerializedName("direction")
+    private Direction upgradeDirection = null;
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/083ac6da/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostVersionDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostVersionDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostVersionDAO.java
index 040876a..e28f9ef 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostVersionDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostVersionDAO.java
@@ -131,8 +131,8 @@ public class HostVersionDAO extends CrudDAO<HostVersionEntity, Long> {
   }
 
   /**
-   * Retrieve all of the host versions for the given cluster name, host name, and state.
-   *
+   * Retrieve all of the host versions for the given cluster name, host name, and state. <br/>
+   * Consider using faster method: {@link HostVersionDAO#findByClusterHostAndState(long, long, org.apache.ambari.server.state.RepositoryVersionState)}
    * @param clusterName Cluster name
    * @param hostName FQDN of host
    * @param state repository version state
@@ -150,8 +150,29 @@ public class HostVersionDAO extends CrudDAO<HostVersionEntity, Long> {
   }
 
   /**
+   * Faster version of {@link HostVersionDAO#findByClusterHostAndState(java.lang.String, java.lang.String, org.apache.ambari.server.state.RepositoryVersionState)}
+   *
+   * @param clusterId Cluster ID
+   * @param hostId Host ID
+   * @param state repository version state
+   * @return Return all of the host versions that match the criteria.
+   */
+  @RequiresSession
+  public List<HostVersionEntity> findByClusterHostAndState(long clusterId, long hostId, RepositoryVersionState state) {
+    TypedQuery<HostVersionEntity> query =
+        entityManagerProvider.get().createNamedQuery("hostVersionByClusterHostIdAndState", HostVersionEntity.class);
+
+    query.setParameter("clusterId", clusterId);
+    query.setParameter("hostId", hostId);
+    query.setParameter("state", state);
+
+    return daoUtils.selectList(query);
+  }
+
+  /**
    * Retrieve the single host version whose state is {@link org.apache.ambari.server.state.RepositoryVersionState#CURRENT}, of which there should be exactly one at all times
    * for the given host.
+   * Consider using faster method {@link HostVersionDAO#findByHostAndStateCurrent(long, long)}
    *
    * @param clusterName Cluster name
    * @param hostName Host name
@@ -175,8 +196,36 @@ public class HostVersionDAO extends CrudDAO<HostVersionEntity, Long> {
   }
 
   /**
+   * Retrieve the single host version whose state is {@link org.apache.ambari.server.state.RepositoryVersionState#CURRENT}, of which there should be exactly one at all times
+   * for the given host.
+   * Faster version of {@link HostVersionDAO#findByHostAndStateCurrent(java.lang.String, java.lang.String)}
+   * @param clusterId Cluster ID
+   * @param hostId host ID
+   * @return Returns the single host version for this host whose state is {@link org.apache.ambari.server.state.RepositoryVersionState#CURRENT}, or {@code null} otherwise.
+   */
+  @RequiresSession
+  public HostVersionEntity findByHostAndStateCurrent(long clusterId, long hostId) {
+    try {
+      List<?> results = findByClusterHostAndState(clusterId, hostId, RepositoryVersionState.CURRENT);
+      if (results.isEmpty()) {
+        return null;
+      } else {
+        if (results.size() == 1) {
+          return (HostVersionEntity) results.get(0);
+        }
+      }
+      throw new NonUniqueResultException();
+    } catch (NoResultException ignored) {
+      return null;
+    }
+  }
+
+  /**
    * Retrieve the single host version for the given cluster, stack name, stack
-   * version, and host name.
+   * version, and host name. <br/>
+   * This query is slow and not suitable for frequent use. <br/>
+   * Please, use {@link HostVersionDAO#findByClusterStackVersionAndHost(long, org.apache.ambari.server.state.StackId, java.lang.String, long)} <br/>
+   * It is ~50 times faster
    *
    * @param clusterName
    *          Cluster name
@@ -203,6 +252,29 @@ public class HostVersionDAO extends CrudDAO<HostVersionEntity, Long> {
     return daoUtils.selectSingle(query);
   }
 
+  /**
+   * Optimized version of {@link HostVersionDAO#findByClusterStackVersionAndHost(java.lang.String, org.apache.ambari.server.state.StackId, java.lang.String, java.lang.String)}
+   * @param clusterId Id of cluster
+   * @param stackId Stack ID (e.g., HDP-2.2)
+   * @param version Stack version (e.g., 2.2.0.1-995)
+   * @param hostId Host Id
+   * @return Returns the single host version that matches the criteria.
+   */
+  @RequiresSession
+  public HostVersionEntity findByClusterStackVersionAndHost(long clusterId, StackId stackId, String version,
+                                                            long hostId) {
+    TypedQuery<HostVersionEntity> query = entityManagerProvider.get()
+        .createNamedQuery("hostVersionByClusterStackVersionAndHostId", HostVersionEntity.class);
+
+    query.setParameter("clusterId", clusterId);
+    query.setParameter("stackName", stackId.getStackName());
+    query.setParameter("stackVersion", stackId.getStackVersion());
+    query.setParameter("version", version);
+    query.setParameter("hostId", hostId);
+
+    return daoUtils.selectSingle(query);
+  }
+
   @Transactional
   public void removeByHostName(String hostName) {
     Collection<HostVersionEntity> hostVersions = this.findByHost(hostName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/083ac6da/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostVersionEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostVersionEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostVersionEntity.java
index b69518b..6be4b50 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostVersionEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostVersionEntity.java
@@ -62,6 +62,15 @@ import org.apache.ambari.server.state.RepositoryVersionState;
         "SELECT hostVersion FROM HostVersionEntity hostVersion JOIN hostVersion.hostEntity host JOIN host.clusterEntities clusters " +
             "WHERE clusters.clusterName=:clusterName AND hostVersion.repositoryVersion.stack.stackName=:stackName AND hostVersion.repositoryVersion.stack.stackVersion=:stackVersion AND hostVersion.repositoryVersion.version=:version AND " +
             "hostVersion.hostEntity.hostName=:hostName"),
+
+    @NamedQuery(name = "hostVersionByClusterHostIdAndState", query =
+        "SELECT hostVersion FROM HostVersionEntity hostVersion JOIN hostVersion.hostEntity host JOIN host.clusterEntities clusters " +
+            "WHERE clusters.clusterId=:clusterId AND hostVersion.hostId=:hostId AND hostVersion.state=:state"),
+
+    @NamedQuery(name = "hostVersionByClusterStackVersionAndHostId", query =
+        "SELECT hostVersion FROM HostVersionEntity hostVersion JOIN hostVersion.hostEntity host JOIN host.clusterEntities clusters " +
+        "WHERE hostVersion.hostId=:hostId AND clusters.clusterId=:clusterId AND hostVersion.repositoryVersion.stack.stackName=:stackName " +
+        "AND hostVersion.repositoryVersion.stack.stackVersion=:stackVersion AND hostVersion.repositoryVersion.version=:version")
 })
 public class HostVersionEntity {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/083ac6da/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 4212975..c6d01e8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -1560,8 +1560,8 @@ public class ClusterImpl implements Cluster {
     StackId repoVersionStackId = new StackId(repoVersionStackEntity);
 
     HostVersionEntity hostVersionEntity = hostVersionDAO.findByClusterStackVersionAndHost(
-      getClusterName(), repoVersionStackId, repositoryVersion.getVersion(),
-      host.getHostName());
+      getClusterId(), repoVersionStackId, repositoryVersion.getVersion(),
+      host.getHostId());
 
     hostTransitionStateWriteLock.lock();
     try {
@@ -1576,7 +1576,7 @@ public class ClusterImpl implements Cluster {
         hostVersionDAO.create(hostVersionEntity);
       }
 
-      HostVersionEntity currentVersionEntity = hostVersionDAO.findByHostAndStateCurrent(getClusterName(), host.getHostName());
+      HostVersionEntity currentVersionEntity = hostVersionDAO.findByHostAndStateCurrent(getClusterId(), host.getHostId());
       boolean isCurrentPresent = (currentVersionEntity != null);
       final ServiceComponentHostSummary hostSummary = new ServiceComponentHostSummary(ambariMetaInfo, host, stack);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/083ac6da/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index bfb6214..1bd60a8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -1319,43 +1319,48 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public ServiceComponentHostResponse convertToResponse() {
-    readLock.lock();
+    clusterGlobalLock.readLock().lock();
     try {
-      HostComponentStateEntity hostComponentStateEntity = getStateEntity();
-      if (null == hostComponentStateEntity) {
-        LOG.warn("Could not convert ServiceComponentHostResponse to a response. It's possible that Host " + getHostName() + " was deleted.");
-        return null;
-      }
+      readLock.lock();
+      try {
+        HostComponentStateEntity hostComponentStateEntity = getStateEntity();
+        if (null == hostComponentStateEntity) {
+          LOG.warn("Could not convert ServiceComponentHostResponse to a response. It's possible that Host " + getHostName() + " was deleted.");
+          return null;
+        }
 
-      String clusterName = serviceComponent.getClusterName();
-      String serviceName = serviceComponent.getServiceName();
-      String serviceComponentName = serviceComponent.getName();
-      String hostName = getHostName();
-      String state = getState().toString();
-      String stackId = getStackVersion().getStackId();
-      String desiredState = getDesiredState().toString();
-      String desiredStackId = getDesiredStackVersion().getStackId();
-      HostComponentAdminState componentAdminState = getComponentAdminState();
-      UpgradeState upgradeState = hostComponentStateEntity.getUpgradeState();
-
-      ServiceComponentHostResponse r = new ServiceComponentHostResponse(
-          clusterName, serviceName,
-          serviceComponentName, hostName, state,
-          stackId, desiredState,
-          desiredStackId, componentAdminState);
-
-      r.setActualConfigs(actualConfigs);
-      r.setUpgradeState(upgradeState);
+        String clusterName = serviceComponent.getClusterName();
+        String serviceName = serviceComponent.getServiceName();
+        String serviceComponentName = serviceComponent.getName();
+        String hostName = getHostName();
+        String state = getState().toString();
+        String stackId = getStackVersion().getStackId();
+        String desiredState = getDesiredState().toString();
+        String desiredStackId = getDesiredStackVersion().getStackId();
+        HostComponentAdminState componentAdminState = getComponentAdminState();
+        UpgradeState upgradeState = hostComponentStateEntity.getUpgradeState();
+
+        ServiceComponentHostResponse r = new ServiceComponentHostResponse(
+            clusterName, serviceName,
+            serviceComponentName, hostName, state,
+            stackId, desiredState,
+            desiredStackId, componentAdminState);
+
+        r.setActualConfigs(actualConfigs);
+        r.setUpgradeState(upgradeState);
 
-      try {
-        r.setStaleConfig(helper.isStaleConfigs(this));
-      } catch (Exception e) {
-        LOG.error("Could not determine stale config", e);
-      }
+        try {
+          r.setStaleConfig(helper.isStaleConfigs(this));
+        } catch (Exception e) {
+          LOG.error("Could not determine stale config", e);
+        }
 
-      return r;
+        return r;
+      } finally {
+        readLock.unlock();
+      }
     } finally {
-      readLock.unlock();
+      clusterGlobalLock.readLock().unlock();
     }
   }
 
@@ -1797,6 +1802,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     }
 
     final String hostName = getHostName();
+    final long hostId = getHost().getHostId();
     final Set<Cluster> clustersForHost = clusters.getClustersForHost(hostName);
     if (clustersForHost.size() != 1) {
       throw new AmbariException("Host " + hostName + " should be assigned only to one cluster");
@@ -1815,7 +1821,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
         repositoryVersion = createRepositoryVersion(version, stackId, stackInfo);
       }
 
-      final HostEntity host = hostDAO.findByName(hostName);
+      final HostEntity host = hostDAO.findById(hostId);
       cluster.transitionHostVersionState(host, repositoryVersion, stackId);
     } finally {
       writeLock.unlock();


[09/11] ambari git commit: AMBARI-15141. Start all services request aborts in the middle and hosts go into heartbeat-lost state. (mpapirkovskyy)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/083ac6da/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
new file mode 100644
index 0000000..eb99142
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
@@ -0,0 +1,1290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.agent;
+
+import com.google.gson.JsonObject;
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
+import com.google.inject.persist.UnitOfWork;
+import junit.framework.Assert;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.Role;
+import org.apache.ambari.server.RoleCommand;
+import org.apache.ambari.server.actionmanager.ActionDBAccessor;
+import org.apache.ambari.server.actionmanager.ActionDBAccessorImpl;
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.HostRoleCommand;
+import org.apache.ambari.server.actionmanager.HostRoleCommandFactory;
+import org.apache.ambari.server.actionmanager.HostRoleStatus;
+import org.apache.ambari.server.actionmanager.Request;
+import org.apache.ambari.server.actionmanager.RequestFactory;
+import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.actionmanager.StageFactory;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.HostsMap;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.dao.HostDAO;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.state.Alert;
+import org.apache.ambari.server.state.AlertState;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.HostState;
+import org.apache.ambari.server.state.SecurityState;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.State;
+import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
+import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
+import org.apache.ambari.server.state.svccomphost.ServiceComponentHostUpgradeEvent;
+import org.apache.ambari.server.utils.EventBusSynchronizer;
+import org.apache.ambari.server.utils.StageUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DATANODE;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyCluster;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyHostStatus;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyHostname1;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOSRelease;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOs;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOsType;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyStackId;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HBASE_MASTER;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HDFS;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HDFS_CLIENT;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.NAMENODE;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.SECONDARY_NAMENODE;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class HeartbeatProcessorTest {
+
+  private static final Logger log = LoggerFactory.getLogger(TestHeartbeatHandler.class);
+  private Injector injector;
+  private Clusters clusters;
+  long requestId = 23;
+  long stageId = 31;
+  private UnitOfWork unitOfWork;
+
+  @Inject
+  Configuration config;
+
+  @Inject
+  ActionDBAccessor actionDBAccessor;
+
+  @Inject
+  HeartbeatTestHelper heartbeatTestHelper;
+
+  @Inject
+  private HostRoleCommandFactory hostRoleCommandFactory;
+
+  @Inject
+  private HostDAO hostDAO;
+
+  @Inject
+  private StageFactory stageFactory;
+
+  @Inject
+  private AmbariMetaInfo metaInfo;
+
+  private final static StackId HDP_22_STACK = new StackId("HDP", "2.2.0");
+
+  @Before
+  public void setup() throws Exception {
+    InMemoryDefaultTestModule module = HeartbeatTestHelper.getTestModule();
+    injector = Guice.createInjector(module);
+    injector.getInstance(GuiceJpaInitializer.class);
+    clusters = injector.getInstance(Clusters.class);
+    injector.injectMembers(this);
+    unitOfWork = injector.getInstance(UnitOfWork.class);
+  }
+
+  @After
+  public void teardown() throws AmbariException {
+    injector.getInstance(PersistService.class).stop();
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testHeartbeatWithConfigs() throws Exception {
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
+    Service hdfs = cluster.addService(HDFS);
+    hdfs.persist();
+    hdfs.addServiceComponent(DATANODE).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(NAMENODE).persist();
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
+    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+
+    ActionQueue aq = new ActionQueue();
+
+    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
+    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
+    serviceComponentHost1.setState(State.INSTALLED);
+    serviceComponentHost2.setState(State.INSTALLED);
+
+    HeartBeat hb = new HeartBeat();
+    hb.setResponseId(0);
+    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, DummyHostStatus));
+    hb.setHostname(DummyHostname1);
+
+    List<CommandReport> reports = new ArrayList<CommandReport>();
+    CommandReport cr = new CommandReport();
+    cr.setActionId(StageUtils.getActionId(requestId, stageId));
+    cr.setServiceName(HDFS);
+    cr.setTaskId(1);
+    cr.setRole(DATANODE);
+    cr.setStatus("COMPLETED");
+    cr.setStdErr("");
+    cr.setStdOut("");
+    cr.setExitCode(215);
+    cr.setRoleCommand("START");
+    cr.setClusterName(DummyCluster);
+
+    cr.setConfigurationTags(new HashMap<String, Map<String, String>>() {{
+      put("global", new HashMap<String, String>() {{
+        put("tag", "version1");
+      }});
+    }});
+
+    reports.add(cr);
+    hb.setReports(reports);
+
+    HostEntity host1 = hostDAO.findByName(DummyHostname1);
+    Assert.assertNotNull(host1);
+    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
+        Role.DATANODE, null, null);
+
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
+    expect(am.getTasks(anyObject(List.class))).andReturn(
+        new ArrayList<HostRoleCommand>() {{
+          add(command);
+        }});
+    replay(am);
+
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
+    HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
+    heartbeatProcessor.processHeartbeat(hb);
+
+    // the heartbeat test passed if actual configs is populated
+    Assert.assertNotNull(serviceComponentHost1.getActualConfigs());
+    Assert.assertEquals(serviceComponentHost1.getActualConfigs().size(), 1);
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testRestartRequiredAfterInstallClient() throws Exception {
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
+    Service hdfs = cluster.addService(HDFS);
+    hdfs.persist();
+    hdfs.addServiceComponent(HDFS_CLIENT).persist();
+    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+
+    ActionQueue aq = new ActionQueue();
+
+    ServiceComponentHost serviceComponentHost = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(HDFS_CLIENT).getServiceComponentHost(DummyHostname1);
+
+    serviceComponentHost.setState(State.INSTALLED);
+    serviceComponentHost.setRestartRequired(true);
+
+    HeartBeat hb = new HeartBeat();
+    hb.setResponseId(0);
+    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, DummyHostStatus));
+    hb.setHostname(DummyHostname1);
+
+
+    List<CommandReport> reports = new ArrayList<CommandReport>();
+    CommandReport cr = new CommandReport();
+    cr.setActionId(StageUtils.getActionId(requestId, stageId));
+    cr.setServiceName(HDFS);
+    cr.setRoleCommand("INSTALL");
+    cr.setCustomCommand("EXECUTION_COMMAND");
+    cr.setTaskId(1);
+    cr.setRole(HDFS_CLIENT);
+    cr.setStatus("COMPLETED");
+    cr.setStdErr("");
+    cr.setStdOut("");
+    cr.setExitCode(215);
+    cr.setClusterName(DummyCluster);
+    cr.setConfigurationTags(new HashMap<String, Map<String, String>>() {{
+      put("global", new HashMap<String, String>() {{
+        put("tag", "version1");
+      }});
+    }});
+    reports.add(cr);
+    hb.setReports(reports);
+
+    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
+        Role.DATANODE, null, null);
+
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
+    expect(am.getTasks(anyObject(List.class))).andReturn(
+        new ArrayList<HostRoleCommand>() {{
+          add(command);
+          add(command);
+        }});
+    replay(am);
+
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
+    HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
+    heartbeatProcessor.processHeartbeat(hb);
+
+    Assert.assertNotNull(serviceComponentHost.getActualConfigs());
+    Assert.assertFalse(serviceComponentHost.isRestartRequired());
+    Assert.assertEquals(serviceComponentHost.getActualConfigs().size(), 1);
+
+  }
+
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testHeartbeatCustomCommandWithConfigs() throws Exception {
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
+    Service hdfs = cluster.addService(HDFS);
+    hdfs.persist();
+    hdfs.addServiceComponent(DATANODE).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(NAMENODE).persist();
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
+    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+
+    ActionQueue aq = new ActionQueue();
+
+    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
+    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
+    serviceComponentHost1.setState(State.INSTALLED);
+    serviceComponentHost2.setState(State.INSTALLED);
+
+    HeartBeat hb = new HeartBeat();
+    hb.setResponseId(0);
+    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, DummyHostStatus));
+    hb.setHostname(DummyHostname1);
+
+    List<CommandReport> reports = new ArrayList<CommandReport>();
+    CommandReport cr = new CommandReport();
+    cr.setActionId(StageUtils.getActionId(requestId, stageId));
+    cr.setServiceName(HDFS);
+    cr.setRoleCommand("CUSTOM_COMMAND");
+    cr.setCustomCommand("RESTART");
+    cr.setTaskId(1);
+    cr.setRole(DATANODE);
+    cr.setStatus("COMPLETED");
+    cr.setStdErr("");
+    cr.setStdOut("");
+    cr.setExitCode(215);
+    cr.setClusterName(DummyCluster);
+    cr.setConfigurationTags(new HashMap<String, Map<String,String>>() {{
+      put("global", new HashMap<String,String>() {{ put("tag", "version1"); }});
+    }});
+    CommandReport crn = new CommandReport();
+    crn.setActionId(StageUtils.getActionId(requestId, stageId));
+    crn.setServiceName(HDFS);
+    crn.setRoleCommand("CUSTOM_COMMAND");
+    crn.setCustomCommand("START");
+    crn.setTaskId(1);
+    crn.setRole(NAMENODE);
+    crn.setStatus("COMPLETED");
+    crn.setStdErr("");
+    crn.setStdOut("");
+    crn.setExitCode(215);
+    crn.setClusterName(DummyCluster);
+    crn.setConfigurationTags(new HashMap<String, Map<String,String>>() {{
+      put("global", new HashMap<String,String>() {{ put("tag", "version1"); }});
+    }});
+
+    reports.add(cr);
+    reports.add(crn);
+    hb.setReports(reports);
+
+    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
+        Role.DATANODE, null, null);
+
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
+    expect(am.getTasks(anyObject(List.class))).andReturn(
+        new ArrayList<HostRoleCommand>() {{
+          add(command);
+          add(command);
+        }});
+    replay(am);
+
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
+    HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
+    heartbeatProcessor.processHeartbeat(hb);
+
+    // the heartbeat test passed if actual configs is populated
+    Assert.assertNotNull(serviceComponentHost1.getActualConfigs());
+    Assert.assertEquals(serviceComponentHost1.getActualConfigs().size(), 1);
+    Assert.assertNotNull(serviceComponentHost2.getActualConfigs());
+    Assert.assertEquals(serviceComponentHost2.getActualConfigs().size(), 1);
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testHeartbeatCustomStartStop() throws Exception {
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
+    Service hdfs = cluster.addService(HDFS);
+    hdfs.persist();
+    hdfs.addServiceComponent(DATANODE).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(NAMENODE).persist();
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
+    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+
+    ActionQueue aq = new ActionQueue();
+
+    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
+    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
+    serviceComponentHost1.setState(State.INSTALLED);
+    serviceComponentHost2.setState(State.STARTED);
+    serviceComponentHost1.setRestartRequired(true);
+    serviceComponentHost2.setRestartRequired(true);
+
+    HeartBeat hb = new HeartBeat();
+    hb.setResponseId(0);
+    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, DummyHostStatus));
+    hb.setHostname(DummyHostname1);
+
+    List<CommandReport> reports = new ArrayList<CommandReport>();
+    CommandReport cr = new CommandReport();
+    cr.setActionId(StageUtils.getActionId(requestId, stageId));
+    cr.setServiceName(HDFS);
+    cr.setRoleCommand("CUSTOM_COMMAND");
+    cr.setCustomCommand("START");
+    cr.setTaskId(1);
+    cr.setRole(DATANODE);
+    cr.setStatus("COMPLETED");
+    cr.setStdErr("");
+    cr.setStdOut("");
+    cr.setExitCode(215);
+    cr.setClusterName(DummyCluster);
+    CommandReport crn = new CommandReport();
+    crn.setActionId(StageUtils.getActionId(requestId, stageId));
+    crn.setServiceName(HDFS);
+    crn.setRoleCommand("CUSTOM_COMMAND");
+    crn.setCustomCommand("STOP");
+    crn.setTaskId(1);
+    crn.setRole(NAMENODE);
+    crn.setStatus("COMPLETED");
+    crn.setStdErr("");
+    crn.setStdOut("");
+    crn.setExitCode(215);
+    crn.setClusterName(DummyCluster);
+
+    reports.add(cr);
+    reports.add(crn);
+    hb.setReports(reports);
+
+    assertTrue(serviceComponentHost1.isRestartRequired());
+
+    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
+        Role.DATANODE, null, null);
+
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
+    expect(am.getTasks(anyObject(List.class))).andReturn(
+        new ArrayList<HostRoleCommand>() {{
+          add(command);
+          add(command);
+        }});
+    replay(am);
+
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
+    HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
+    heartbeatProcessor.processHeartbeat(hb);
+
+    // the heartbeat test passed if actual configs is populated
+    State componentState1 = serviceComponentHost1.getState();
+    assertEquals(State.STARTED, componentState1);
+    assertFalse(serviceComponentHost1.isRestartRequired());
+    State componentState2 = serviceComponentHost2.getState();
+    assertEquals(State.INSTALLED, componentState2);
+    assertTrue(serviceComponentHost2.isRestartRequired());
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testStatusHeartbeat() throws Exception {
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
+    Service hdfs = cluster.addService(HDFS);
+    hdfs.persist();
+    hdfs.addServiceComponent(DATANODE).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(NAMENODE).persist();
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
+    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+
+    ActionQueue aq = new ActionQueue();
+
+    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
+    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
+    ServiceComponentHost serviceComponentHost3 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(SECONDARY_NAMENODE).getServiceComponentHost(DummyHostname1);
+    serviceComponentHost1.setState(State.INSTALLED);
+    serviceComponentHost1.setSecurityState(SecurityState.UNSECURED);
+    serviceComponentHost2.setState(State.INSTALLED);
+    serviceComponentHost2.setSecurityState(SecurityState.SECURING);
+    serviceComponentHost3.setState(State.STARTING);
+
+    HeartBeat hb = new HeartBeat();
+    hb.setTimestamp(System.currentTimeMillis());
+    hb.setResponseId(0);
+    hb.setHostname(DummyHostname1);
+    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, DummyHostStatus));
+    hb.setReports(new ArrayList<CommandReport>());
+    ArrayList<ComponentStatus> componentStatuses = new ArrayList<ComponentStatus>();
+    ComponentStatus componentStatus1 = new ComponentStatus();
+    componentStatus1.setClusterName(DummyCluster);
+    componentStatus1.setServiceName(HDFS);
+    componentStatus1.setMessage(DummyHostStatus);
+    componentStatus1.setStatus(State.STARTED.name());
+    componentStatus1.setSecurityState(SecurityState.SECURED_KERBEROS.name());
+    componentStatus1.setComponentName(DATANODE);
+    componentStatuses.add(componentStatus1);
+    ComponentStatus componentStatus2 = new ComponentStatus();
+    componentStatus2.setClusterName(DummyCluster);
+    componentStatus2.setServiceName(HDFS);
+    componentStatus2.setMessage(DummyHostStatus);
+    componentStatus2.setStatus(State.STARTED.name());
+    componentStatus2.setSecurityState(SecurityState.UNSECURED.name());
+    componentStatus2.setComponentName(SECONDARY_NAMENODE);
+    componentStatuses.add(componentStatus2);
+    hb.setComponentStatus(componentStatuses);
+
+    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
+        Role.DATANODE, null, null);
+
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
+    expect(am.getTasks(anyObject(List.class))).andReturn(
+        new ArrayList<HostRoleCommand>() {{
+          add(command);
+          add(command);
+        }});
+    replay(am);
+
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
+    HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
+    heartbeatProcessor.processHeartbeat(hb);
+    State componentState1 = serviceComponentHost1.getState();
+    State componentState2 = serviceComponentHost2.getState();
+    State componentState3 = serviceComponentHost3.getState();
+    assertEquals(State.STARTED, componentState1);
+    assertEquals(SecurityState.SECURED_KERBEROS, serviceComponentHost1.getSecurityState());
+    assertEquals(State.INSTALLED, componentState2);
+    assertEquals(SecurityState.SECURING, serviceComponentHost2.getSecurityState());
+    assertEquals(State.STARTED, componentState3);
+    assertEquals(SecurityState.UNSECURED, serviceComponentHost3.getSecurityState());
+  }
+
+
+  @Test
+  public void testCommandReport() throws AmbariException {
+    injector.injectMembers(this);
+    clusters.addHost(DummyHostname1);
+    clusters.getHost(DummyHostname1).persist();
+
+    StackId dummyStackId = new StackId(DummyStackId);
+    clusters.addCluster(DummyCluster, dummyStackId);
+
+    ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class);
+    ActionManager am = new ActionManager(5000, 1200000, new ActionQueue(), clusters, db,
+        new HostsMap((String) null), unitOfWork, injector.getInstance(RequestFactory.class), null, null);
+    heartbeatTestHelper.populateActionDB(db, DummyHostname1, requestId, stageId);
+    Stage stage = db.getAllStages(requestId).get(0);
+    Assert.assertEquals(stageId, stage.getStageId());
+    stage.setHostRoleStatus(DummyHostname1, HBASE_MASTER, HostRoleStatus.QUEUED);
+    db.hostRoleScheduled(stage, DummyHostname1, HBASE_MASTER);
+    List<CommandReport> reports = new ArrayList<CommandReport>();
+    CommandReport cr = new CommandReport();
+    cr.setActionId(StageUtils.getActionId(requestId, stageId));
+    cr.setTaskId(1);
+    cr.setRole(HBASE_MASTER);
+    cr.setStatus("COMPLETED");
+    cr.setStdErr("");
+    cr.setStdOut("");
+    cr.setExitCode(215);
+
+    cr.setConfigurationTags(new HashMap<String, Map<String,String>>() {{
+      put("global", new HashMap<String,String>() {{ put("tag", "version1"); }});
+    }});
+
+
+    reports.add(cr);
+    am.processTaskResponse(DummyHostname1, reports, stage.getOrderedHostRoleCommands());
+    assertEquals(215,
+        am.getAction(requestId, stageId).getExitCode(DummyHostname1, HBASE_MASTER));
+    assertEquals(HostRoleStatus.COMPLETED, am.getAction(requestId, stageId)
+        .getHostRoleStatus(DummyHostname1, HBASE_MASTER));
+    Stage s = db.getAllStages(requestId).get(0);
+    assertEquals(HostRoleStatus.COMPLETED,
+        s.getHostRoleStatus(DummyHostname1, HBASE_MASTER));
+    assertEquals(215,
+        s.getExitCode(DummyHostname1, HBASE_MASTER));
+  }
+
+  /**
+   * Tests the fact that when START and STOP commands are in progress, and heartbeat
+   * forces the host component state to STARTED or INSTALLED, there are no undesired
+   * side effects.
+   * @throws AmbariException
+   * @throws InvalidStateTransitionException
+   */
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCommandReportOnHeartbeatUpdatedState()
+      throws AmbariException, InvalidStateTransitionException {
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
+    Service hdfs = cluster.addService(HDFS);
+    hdfs.persist();
+    hdfs.addServiceComponent(DATANODE).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+
+    ActionQueue aq = new ActionQueue();
+
+    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
+    serviceComponentHost1.setState(State.INSTALLED);
+
+    HeartBeat hb = new HeartBeat();
+    hb.setTimestamp(System.currentTimeMillis());
+    hb.setResponseId(0);
+    hb.setHostname(DummyHostname1);
+    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, DummyHostStatus));
+
+    List<CommandReport> reports = new ArrayList<CommandReport>();
+    CommandReport cr = new CommandReport();
+    cr.setActionId(StageUtils.getActionId(requestId, stageId));
+    cr.setTaskId(1);
+    cr.setClusterName(DummyCluster);
+    cr.setServiceName(HDFS);
+    cr.setRole(DATANODE);
+    cr.setStatus(HostRoleStatus.IN_PROGRESS.toString());
+    cr.setStdErr("none");
+    cr.setStdOut("dummy output");
+    cr.setExitCode(777);
+    cr.setRoleCommand("START");
+    reports.add(cr);
+    hb.setReports(reports);
+    hb.setComponentStatus(new ArrayList<ComponentStatus>());
+
+    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
+        Role.DATANODE, null, null);
+
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
+    expect(am.getTasks(anyObject(List.class))).andReturn(
+        new ArrayList<HostRoleCommand>() {{
+          add(command);
+        }}).anyTimes();
+    replay(am);
+
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
+    HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
+    heartbeatProcessor.processHeartbeat(hb);
+
+    assertEquals("Host state should  be " + State.INSTALLED,
+        State.INSTALLED, serviceComponentHost1.getState());
+
+    hb.setTimestamp(System.currentTimeMillis());
+    hb.setResponseId(1);
+    cr.setStatus(HostRoleStatus.COMPLETED.toString());
+    cr.setExitCode(0);
+
+    heartbeatProcessor.processHeartbeat(hb);
+    assertEquals("Host state should be " + State.STARTED,
+        State.STARTED, serviceComponentHost1.getState());
+
+    hb.setTimestamp(System.currentTimeMillis());
+    hb.setResponseId(2);
+    cr.setStatus(HostRoleStatus.IN_PROGRESS.toString());
+    cr.setRoleCommand("STOP");
+    cr.setExitCode(777);
+
+    heartbeatProcessor.processHeartbeat(hb);
+    assertEquals("Host state should be " + State.STARTED,
+        State.STARTED, serviceComponentHost1.getState());
+
+    hb.setTimestamp(System.currentTimeMillis());
+    hb.setResponseId(3);
+    cr.setStatus(HostRoleStatus.COMPLETED.toString());
+    cr.setExitCode(0);
+
+    heartbeatProcessor.processHeartbeat(hb);
+    assertEquals("Host state should be " + State.INSTALLED,
+        State.INSTALLED, serviceComponentHost1.getState());
+
+    // validate the transitions when there is no heartbeat
+    serviceComponentHost1.setState(State.STARTING);
+    cr.setStatus(HostRoleStatus.IN_PROGRESS.toString());
+    cr.setExitCode(777);
+    cr.setRoleCommand("START");
+    hb.setResponseId(4);
+
+    heartbeatProcessor.processHeartbeat(hb);
+    assertEquals("Host state should be " + State.STARTING,
+        State.STARTING, serviceComponentHost1.getState());
+
+    cr.setStatus(HostRoleStatus.COMPLETED.toString());
+    cr.setExitCode(0);
+    hb.setResponseId(5);
+
+    heartbeatProcessor.processHeartbeat(hb);
+    assertEquals("Host state should be " + State.STARTED,
+        State.STARTED, serviceComponentHost1.getState());
+
+    serviceComponentHost1.setState(State.STOPPING);
+    cr.setStatus(HostRoleStatus.IN_PROGRESS.toString());
+    cr.setExitCode(777);
+    cr.setRoleCommand("STOP");
+    hb.setResponseId(6);
+
+    heartbeatProcessor.processHeartbeat(hb);
+    assertEquals("Host state should be " + State.STOPPING,
+        State.STOPPING, serviceComponentHost1.getState());
+
+    cr.setStatus(HostRoleStatus.COMPLETED.toString());
+    cr.setExitCode(0);
+    hb.setResponseId(7);
+
+    heartbeatProcessor.processHeartbeat(hb);
+    assertEquals("Host state should be " + State.INSTALLED,
+        State.INSTALLED, serviceComponentHost1.getState());
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testUpgradeSpecificHandling() throws AmbariException, InvalidStateTransitionException {
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
+    Service hdfs = cluster.addService(HDFS);
+    hdfs.persist();
+    hdfs.addServiceComponent(DATANODE).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+
+    ActionQueue aq = new ActionQueue();
+
+    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
+    serviceComponentHost1.setState(State.UPGRADING);
+
+    HeartBeat hb = new HeartBeat();
+    hb.setTimestamp(System.currentTimeMillis());
+    hb.setResponseId(0);
+    hb.setHostname(DummyHostname1);
+    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, DummyHostStatus));
+
+    List<CommandReport> reports = new ArrayList<CommandReport>();
+    CommandReport cr = new CommandReport();
+    cr.setActionId(StageUtils.getActionId(requestId, stageId));
+    cr.setTaskId(1);
+    cr.setClusterName(DummyCluster);
+    cr.setServiceName(HDFS);
+    cr.setRole(DATANODE);
+    cr.setRoleCommand("INSTALL");
+    cr.setStatus(HostRoleStatus.IN_PROGRESS.toString());
+    cr.setStdErr("none");
+    cr.setStdOut("dummy output");
+    cr.setExitCode(777);
+    reports.add(cr);
+    hb.setReports(reports);
+    hb.setComponentStatus(new ArrayList<ComponentStatus>());
+
+    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
+        Role.DATANODE, null, null);
+
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
+    expect(am.getTasks(anyObject(List.class))).andReturn(
+        new ArrayList<HostRoleCommand>() {{
+          add(command);
+        }}).anyTimes();
+    replay(am);
+
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
+    HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
+    heartbeatProcessor.processHeartbeat(hb);
+
+    assertEquals("Host state should  be " + State.UPGRADING,
+        State.UPGRADING, serviceComponentHost1.getState());
+
+    hb.setTimestamp(System.currentTimeMillis());
+    hb.setResponseId(1);
+    cr.setStatus(HostRoleStatus.COMPLETED.toString());
+    cr.setExitCode(0);
+
+    heartbeatProcessor.processHeartbeat(hb);
+    assertEquals("Host state should be " + State.INSTALLED,
+        State.INSTALLED, serviceComponentHost1.getState());
+
+    serviceComponentHost1.setState(State.UPGRADING);
+    hb.setTimestamp(System.currentTimeMillis());
+    hb.setResponseId(2);
+    cr.setStatus(HostRoleStatus.FAILED.toString());
+    cr.setExitCode(3);
+
+    heartbeatProcessor.processHeartbeat(hb);
+    assertEquals("Host state should be " + State.UPGRADING,
+        State.UPGRADING, serviceComponentHost1.getState());
+
+    serviceComponentHost1.setState(State.UPGRADING);
+    hb.setTimestamp(System.currentTimeMillis());
+    hb.setResponseId(3);
+    cr.setStatus(HostRoleStatus.PENDING.toString());
+    cr.setExitCode(55);
+
+    heartbeatProcessor.processHeartbeat(hb);
+    assertEquals("Host state should be " + State.UPGRADING,
+        State.UPGRADING, serviceComponentHost1.getState());
+
+    serviceComponentHost1.setState(State.UPGRADING);
+    hb.setTimestamp(System.currentTimeMillis());
+    hb.setResponseId(4);
+    cr.setStatus(HostRoleStatus.QUEUED.toString());
+    cr.setExitCode(55);
+
+    heartbeatProcessor.processHeartbeat(hb);
+    assertEquals("Host state should be " + State.UPGRADING,
+        State.UPGRADING, serviceComponentHost1.getState());
+  }
+
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testCommandStatusProcesses() throws Exception {
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
+    Service hdfs = cluster.addService(HDFS);
+    hdfs.persist();
+    hdfs.addServiceComponent(DATANODE).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
+
+    ActionQueue aq = new ActionQueue();
+
+    HeartBeat hb = new HeartBeat();
+    hb.setTimestamp(System.currentTimeMillis());
+    hb.setResponseId(0);
+    hb.setHostname(DummyHostname1);
+    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, DummyHostStatus));
+    hb.setReports(new ArrayList<CommandReport>());
+
+    List<Map<String, String>> procs = new ArrayList<Map<String, String>>();
+    Map<String, String> proc1info = new HashMap<String, String>();
+    proc1info.put("name", "a");
+    proc1info.put("status", "RUNNING");
+    procs.add(proc1info);
+
+    Map<String, String> proc2info = new HashMap<String, String>();
+    proc2info.put("name", "b");
+    proc2info.put("status", "NOT_RUNNING");
+    procs.add(proc2info);
+
+    Map<String, Object> extra = new HashMap<String, Object>();
+    extra.put("processes", procs);
+
+    ArrayList<ComponentStatus> componentStatuses = new ArrayList<ComponentStatus>();
+    ComponentStatus componentStatus1 = new ComponentStatus();
+    componentStatus1.setClusterName(DummyCluster);
+    componentStatus1.setServiceName(HDFS);
+    componentStatus1.setMessage(DummyHostStatus);
+    componentStatus1.setStatus(State.STARTED.name());
+    componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
+    componentStatus1.setComponentName(DATANODE);
+
+    componentStatus1.setExtra(extra);
+    componentStatuses.add(componentStatus1);
+    hb.setComponentStatus(componentStatuses);
+
+    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
+        Role.DATANODE, null, null);
+
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
+    expect(am.getTasks(anyObject(List.class))).andReturn(
+        new ArrayList<HostRoleCommand>() {{
+          add(command);
+        }}).anyTimes();
+    replay(am);
+
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
+    HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
+    heartbeatProcessor.processHeartbeat(hb);
+    ServiceComponentHost sch = hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
+
+    Assert.assertEquals(Integer.valueOf(2), Integer.valueOf(sch.getProcesses().size()));
+
+    hb = new HeartBeat();
+    hb.setTimestamp(System.currentTimeMillis());
+    hb.setResponseId(1);
+    hb.setHostname(DummyHostname1);
+    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, DummyHostStatus));
+    hb.setReports(new ArrayList<CommandReport>());
+
+    componentStatus1 = new ComponentStatus();
+    componentStatus1.setClusterName(DummyCluster);
+    componentStatus1.setServiceName(HDFS);
+    componentStatus1.setMessage(DummyHostStatus);
+    componentStatus1.setStatus(State.STARTED.name());
+    componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
+    componentStatus1.setComponentName(DATANODE);
+    hb.setComponentStatus(Collections.singletonList(componentStatus1));
+
+    heartbeatProcessor.processHeartbeat(hb);
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testComponentUpgradeCompleteReport() throws AmbariException, InvalidStateTransitionException {
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
+    Service hdfs = cluster.addService(HDFS);
+    hdfs.persist();
+    hdfs.addServiceComponent(DATANODE).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(NAMENODE).persist();
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(HDFS_CLIENT).persist();
+    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+
+    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
+    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
+
+    StackId stack130 = new StackId("HDP-1.3.0");
+    StackId stack120 = new StackId("HDP-1.2.0");
+
+    serviceComponentHost1.setState(State.UPGRADING);
+    serviceComponentHost2.setState(State.INSTALLING);
+
+    serviceComponentHost1.setStackVersion(stack120);
+    serviceComponentHost1.setDesiredStackVersion(stack130);
+    serviceComponentHost2.setStackVersion(stack120);
+
+    HeartBeat hb = new HeartBeat();
+    hb.setTimestamp(System.currentTimeMillis());
+    hb.setResponseId(0);
+    hb.setHostname(DummyHostname1);
+    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, DummyHostStatus));
+    CommandReport cr1 = new CommandReport();
+    cr1.setActionId(StageUtils.getActionId(requestId, stageId));
+    cr1.setTaskId(1);
+    cr1.setClusterName(DummyCluster);
+    cr1.setServiceName(HDFS);
+    cr1.setRole(DATANODE);
+    cr1.setStatus(HostRoleStatus.COMPLETED.toString());
+    cr1.setStdErr("none");
+    cr1.setStdOut("dummy output");
+    cr1.setExitCode(0);
+    cr1.setRoleCommand(RoleCommand.UPGRADE.toString());
+
+    CommandReport cr2 = new CommandReport();
+    cr2.setActionId(StageUtils.getActionId(requestId, stageId));
+    cr2.setTaskId(2);
+    cr2.setClusterName(DummyCluster);
+    cr2.setServiceName(HDFS);
+    cr2.setRole(NAMENODE);
+    cr2.setStatus(HostRoleStatus.COMPLETED.toString());
+    cr2.setStdErr("none");
+    cr2.setStdOut("dummy output");
+    cr2.setExitCode(0);
+    cr2.setRoleCommand(RoleCommand.UPGRADE.toString());
+    ArrayList<CommandReport> reports = new ArrayList<CommandReport>();
+    reports.add(cr1);
+    reports.add(cr2);
+    hb.setReports(reports);
+
+    ActionQueue aq = new ActionQueue();
+    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
+        Role.DATANODE, null, null);
+
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
+    expect(am.getTasks(anyObject(List.class))).andReturn(
+        new ArrayList<HostRoleCommand>() {{
+          add(command);
+          add(command);
+        }});
+    replay(am);
+
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
+    HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
+    heartbeatProcessor.processHeartbeat(hb);
+
+    assertEquals("Stack version for SCH should be updated to " +
+            serviceComponentHost1.getDesiredStackVersion(),
+        stack130, serviceComponentHost1.getStackVersion());
+    assertEquals("Stack version for SCH should not change ",
+        stack120, serviceComponentHost2.getStackVersion());
+  }
+
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testComponentUpgradeFailReport() throws AmbariException, InvalidStateTransitionException {
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
+    Service hdfs = cluster.addService(HDFS);
+    hdfs.persist();
+    hdfs.addServiceComponent(DATANODE).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(NAMENODE).persist();
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(HDFS_CLIENT).persist();
+    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+
+    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
+    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
+
+    StackId stack130 = new StackId("HDP-1.3.0");
+    StackId stack120 = new StackId("HDP-1.2.0");
+
+    serviceComponentHost1.setState(State.UPGRADING);
+    serviceComponentHost2.setState(State.INSTALLING);
+
+    serviceComponentHost1.setStackVersion(stack120);
+    serviceComponentHost1.setDesiredStackVersion(stack130);
+    serviceComponentHost2.setStackVersion(stack120);
+
+    Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action manager test",
+        "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+    s.setStageId(stageId);
+    s.addHostRoleExecutionCommand(DummyHostname1, Role.DATANODE, RoleCommand.UPGRADE,
+        new ServiceComponentHostUpgradeEvent(Role.DATANODE.toString(),
+            DummyHostname1, System.currentTimeMillis(), "HDP-1.3.0"),
+        DummyCluster, "HDFS", false, false);
+    s.addHostRoleExecutionCommand(DummyHostname1, Role.NAMENODE, RoleCommand.INSTALL,
+        new ServiceComponentHostInstallEvent(Role.NAMENODE.toString(),
+            DummyHostname1, System.currentTimeMillis(), "HDP-1.3.0"),
+        DummyCluster, "HDFS", false, false);
+    List<Stage> stages = new ArrayList<Stage>();
+    stages.add(s);
+    Request request = new Request(stages, clusters);
+    actionDBAccessor.persistActions(request);
+    CommandReport cr = new CommandReport();
+    cr.setActionId(StageUtils.getActionId(requestId, stageId));
+    cr.setTaskId(1);
+    cr.setClusterName(DummyCluster);
+    cr.setServiceName(HDFS);
+    cr.setRole(DATANODE);
+    cr.setStatus(HostRoleStatus.IN_PROGRESS.toString());
+    cr.setStdErr("none");
+    cr.setStdOut("dummy output");
+    actionDBAccessor.updateHostRoleState(DummyHostname1, requestId, stageId,
+        Role.DATANODE.name(), cr);
+    cr.setRole(NAMENODE);
+    cr.setTaskId(2);
+    actionDBAccessor.updateHostRoleState(DummyHostname1, requestId, stageId,
+        Role.NAMENODE.name(), cr);
+
+    HeartBeat hb = new HeartBeat();
+    hb.setTimestamp(System.currentTimeMillis());
+    hb.setResponseId(0);
+    hb.setHostname(DummyHostname1);
+    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, DummyHostStatus));
+    CommandReport cr1 = new CommandReport();
+    cr1.setActionId(StageUtils.getActionId(requestId, stageId));
+    cr1.setTaskId(1);
+    cr1.setClusterName(DummyCluster);
+    cr1.setServiceName(HDFS);
+    cr1.setRole(DATANODE);
+    cr1.setRoleCommand("INSTALL");
+    cr1.setStatus(HostRoleStatus.FAILED.toString());
+    cr1.setStdErr("none");
+    cr1.setStdOut("dummy output");
+    cr1.setExitCode(0);
+
+    CommandReport cr2 = new CommandReport();
+    cr2.setActionId(StageUtils.getActionId(requestId, stageId));
+    cr2.setTaskId(2);
+    cr2.setClusterName(DummyCluster);
+    cr2.setServiceName(HDFS);
+    cr2.setRole(NAMENODE);
+    cr2.setRoleCommand("INSTALL");
+    cr2.setStatus(HostRoleStatus.FAILED.toString());
+    cr2.setStdErr("none");
+    cr2.setStdOut("dummy output");
+    cr2.setExitCode(0);
+    ArrayList<CommandReport> reports = new ArrayList<CommandReport>();
+    reports.add(cr1);
+    reports.add(cr2);
+    hb.setReports(reports);
+
+    ActionQueue aq = new ActionQueue();
+    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
+        Role.DATANODE, null, null);
+
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
+    expect(am.getTasks(anyObject(List.class))).andReturn(
+        new ArrayList<HostRoleCommand>() {{
+          add(command);
+          add(command);
+        }});
+    replay(am);
+
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
+    HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
+    heartbeatProcessor.processHeartbeat(hb);
+
+    assertEquals("State of SCH should change after fail report",
+        State.UPGRADING, serviceComponentHost1.getState());
+    assertEquals("State of SCH should change after fail report",
+        State.INSTALL_FAILED, serviceComponentHost2.getState());
+    assertEquals("Stack version of SCH should not change after fail report",
+        stack120, serviceComponentHost1.getStackVersion());
+    assertEquals("Stack version of SCH should not change after fail report",
+        stack130, serviceComponentHost1.getDesiredStackVersion());
+    assertEquals("Stack version of SCH should not change after fail report",
+        State.INSTALL_FAILED, serviceComponentHost2.getState());
+  }
+
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testComponentUpgradeInProgressReport() throws AmbariException, InvalidStateTransitionException {
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
+    Service hdfs = cluster.addService(HDFS);
+    hdfs.persist();
+    hdfs.addServiceComponent(DATANODE).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(NAMENODE).persist();
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(HDFS_CLIENT).persist();
+    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+
+    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
+    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
+
+    StackId stack130 = new StackId("HDP-1.3.0");
+    StackId stack120 = new StackId("HDP-1.2.0");
+
+    serviceComponentHost1.setState(State.UPGRADING);
+    serviceComponentHost2.setState(State.INSTALLING);
+
+    serviceComponentHost1.setStackVersion(stack120);
+    serviceComponentHost1.setDesiredStackVersion(stack130);
+    serviceComponentHost2.setStackVersion(stack120);
+
+    HeartBeat hb = new HeartBeat();
+    hb.setTimestamp(System.currentTimeMillis());
+    hb.setResponseId(0);
+    hb.setHostname(DummyHostname1);
+    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, DummyHostStatus));
+    CommandReport cr1 = new CommandReport();
+    cr1.setActionId(StageUtils.getActionId(requestId, stageId));
+    cr1.setTaskId(1);
+    cr1.setClusterName(DummyCluster);
+    cr1.setServiceName(HDFS);
+    cr1.setRole(DATANODE);
+    cr1.setRoleCommand("INSTALL");
+    cr1.setStatus(HostRoleStatus.IN_PROGRESS.toString());
+    cr1.setStdErr("none");
+    cr1.setStdOut("dummy output");
+    cr1.setExitCode(777);
+
+    CommandReport cr2 = new CommandReport();
+    cr2.setActionId(StageUtils.getActionId(requestId, stageId));
+    cr2.setTaskId(2);
+    cr2.setClusterName(DummyCluster);
+    cr2.setServiceName(HDFS);
+    cr2.setRole(NAMENODE);
+    cr2.setRoleCommand("INSTALL");
+    cr2.setStatus(HostRoleStatus.IN_PROGRESS.toString());
+    cr2.setStdErr("none");
+    cr2.setStdOut("dummy output");
+    cr2.setExitCode(777);
+    ArrayList<CommandReport> reports = new ArrayList<CommandReport>();
+    reports.add(cr1);
+    reports.add(cr2);
+    hb.setReports(reports);
+
+    ActionQueue aq = new ActionQueue();
+    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
+        Role.DATANODE, null, null);
+
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
+    expect(am.getTasks(anyObject(List.class))).andReturn(
+        new ArrayList<HostRoleCommand>() {{
+          add(command);
+          add(command);
+        }});
+    replay(am);
+
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
+    handler.handleHeartBeat(hb);
+    assertEquals("State of SCH not change while operation is in progress",
+        State.UPGRADING, serviceComponentHost1.getState());
+    assertEquals("Stack version of SCH should not change after in progress report",
+        stack130, serviceComponentHost1.getDesiredStackVersion());
+    assertEquals("State of SCH not change while operation is  in progress",
+        State.INSTALLING, serviceComponentHost2.getState());
+  }
+
+
+  /**
+   * Tests that if there is an invalid cluster in heartbeat data, the heartbeat
+   * doesn't fail.
+   *
+   * @throws Exception
+   */
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testHeartBeatWithAlertAndInvalidCluster() throws Exception {
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
+
+    expect(am.getTasks(anyObject(List.class))).andReturn(
+        new ArrayList<HostRoleCommand>());
+
+    replay(am);
+
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
+    Clusters fsm = clusters;
+    Host hostObject = clusters.getHost(DummyHostname1);
+    hostObject.setIPv4("ipv4");
+    hostObject.setIPv6("ipv6");
+    hostObject.setOsType(DummyOsType);
+
+    ActionQueue aq = new ActionQueue();
+
+    HeartBeatHandler handler = new HeartBeatHandler(fsm, aq, am, injector);
+    Register reg = new Register();
+    HostInfo hi = new HostInfo();
+    hi.setHostName(DummyHostname1);
+    hi.setOS(DummyOs);
+    hi.setOSRelease(DummyOSRelease);
+    reg.setHostname(DummyHostname1);
+    reg.setHardwareProfile(hi);
+    reg.setAgentVersion(metaInfo.getServerVersion());
+    handler.handleRegistration(reg);
+
+    hostObject.setState(HostState.UNHEALTHY);
+
+    ExecutionCommand execCmd = new ExecutionCommand();
+    execCmd.setRequestAndStage(2, 34);
+    execCmd.setHostname(DummyHostname1);
+    aq.enqueue(DummyHostname1, new ExecutionCommand());
+
+    HeartBeat hb = new HeartBeat();
+    HostStatus hs = new HostStatus(HostStatus.Status.HEALTHY, DummyHostStatus);
+
+    hb.setResponseId(0);
+    hb.setNodeStatus(hs);
+    hb.setHostname(DummyHostname1);
+
+    Alert alert = new Alert("foo", "bar", "baz", "foobar", "foobarbaz",
+        AlertState.OK);
+
+    alert.setCluster("BADCLUSTER");
+
+    List<Alert> alerts = Collections.singletonList(alert);
+    hb.setAlerts(alerts);
+
+    // should NOT throw AmbariException from alerts.
+    handler.getHeartbeatProcessor().processHeartbeat(hb);
+  }
+
+
+  @Test
+  public void testInstallPackagesWithVersion() throws Exception {
+    // required since this test method checks the DAO result of handling a
+    // heartbeat which performs some async tasks
+    EventBusSynchronizer.synchronizeAmbariEventPublisher(injector);
+
+    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
+        Role.DATANODE, null, null);
+
+    ActionManager am = heartbeatTestHelper.getMockActionManager();
+    expect(am.getTasks(anyObject(List.class))).andReturn(
+        Collections.singletonList(command)).anyTimes();
+    replay(am);
+
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
+    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, new ActionQueue());
+    HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
+    HeartBeat hb = new HeartBeat();
+
+    JsonObject json = new JsonObject();
+    json.addProperty("actual_version", "2.2.1.0-2222");
+    json.addProperty("package_installation_result", "SUCCESS");
+    json.addProperty("installed_repository_version", "0.1");
+    json.addProperty("stack_id", cluster.getDesiredStackVersion().getStackId());
+
+
+    CommandReport cmdReport = new CommandReport();
+    cmdReport.setActionId(StageUtils.getActionId(requestId, stageId));
+    cmdReport.setTaskId(1);
+    cmdReport.setCustomCommand("install_packages");
+    cmdReport.setStructuredOut(json.toString());
+    cmdReport.setRoleCommand(RoleCommand.ACTIONEXECUTE.name());
+    cmdReport.setStatus(HostRoleStatus.COMPLETED.name());
+    cmdReport.setRole("install_packages");
+    cmdReport.setClusterName(DummyCluster);
+
+    hb.setReports(Collections.singletonList(cmdReport));
+    hb.setTimestamp(0L);
+    hb.setResponseId(0);
+    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, DummyHostStatus));
+    hb.setHostname(DummyHostname1);
+    hb.setComponentStatus(new ArrayList<ComponentStatus>());
+
+    StackId stackId = new StackId("HDP", "0.1");
+
+    RepositoryVersionDAO dao = injector.getInstance(RepositoryVersionDAO.class);
+    RepositoryVersionEntity entity = dao.findByStackAndVersion(stackId, "0.1");
+    Assert.assertNotNull(entity);
+
+    heartbeatProcessor.processHeartbeat(hb);
+
+    entity = dao.findByStackAndVersion(stackId, "0.1");
+    Assert.assertNull(entity);
+
+    entity = dao.findByStackAndVersion(stackId, "2.2.1.0-2222");
+    Assert.assertNotNull(entity);
+  }
+
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/083ac6da/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
new file mode 100644
index 0000000..02974ca
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.agent;
+
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.Singleton;
+import com.google.inject.persist.UnitOfWork;
+import junit.framework.Assert;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.Role;
+import org.apache.ambari.server.RoleCommand;
+import org.apache.ambari.server.actionmanager.ActionDBAccessor;
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.Request;
+import org.apache.ambari.server.actionmanager.RequestFactory;
+import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.actionmanager.StageFactory;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.HostsMap;
+import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.HostDAO;
+import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.orm.entities.ResourceEntity;
+import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.security.authorization.ResourceType;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.RepositoryVersionState;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
+import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyCluster;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyHostname1;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOSRelease;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOs;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyStackId;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HBASE;
+import static org.easymock.EasyMock.createMockBuilder;
+import static org.easymock.EasyMock.createNiceMock;
+
+@Singleton
+public class HeartbeatTestHelper {
+
+  @Inject
+  Clusters clusters;
+
+  @Inject
+  Injector injector;
+
+  @Inject
+  AmbariMetaInfo metaInfo;
+
+  @Inject
+  ActionDBAccessor actionDBAccessor;
+
+  @Inject
+  ClusterDAO clusterDAO;
+
+  @Inject
+  StackDAO stackDAO;
+
+  @Inject
+  UnitOfWork unitOfWork;
+
+  @Inject
+  ResourceTypeDAO resourceTypeDAO;
+
+  @Inject
+  OrmTestHelper helper;
+
+  @Inject
+  private HostDAO hostDAO;
+
+  @Inject
+  private StageFactory stageFactory;
+
+  public final static StackId HDP_22_STACK = new StackId("HDP", "2.2.0");
+
+  public static InMemoryDefaultTestModule getTestModule() {
+    return new InMemoryDefaultTestModule(){
+
+      @Override
+      protected void configure() {
+        getProperties().put("recovery.type", "FULL");
+        getProperties().put("recovery.lifetime_max_count", "10");
+        getProperties().put("recovery.max_count", "4");
+        getProperties().put("recovery.window_in_minutes", "23");
+        getProperties().put("recovery.retry_interval", "2");
+        super.configure();
+      }
+    };
+  }
+
+  public HeartBeatHandler getHeartBeatHandler(ActionManager am, ActionQueue aq)
+      throws InvalidStateTransitionException, AmbariException {
+    HeartBeatHandler handler = new HeartBeatHandler(clusters, aq, am, injector);
+    Register reg = new Register();
+    HostInfo hi = new HostInfo();
+    hi.setHostName(DummyHostname1);
+    hi.setOS(DummyOs);
+    hi.setOSRelease(DummyOSRelease);
+    reg.setHostname(DummyHostname1);
+    reg.setResponseId(0);
+    reg.setHardwareProfile(hi);
+    reg.setAgentVersion(metaInfo.getServerVersion());
+    handler.handleRegistration(reg);
+    return handler;
+  }
+
+  public ActionManager getMockActionManager() {
+    ActionQueue actionQueueMock = createNiceMock(ActionQueue.class);
+    Clusters clustersMock = createNiceMock(Clusters.class);
+    Configuration configurationMock = createNiceMock(Configuration.class);
+
+    ActionManager actionManager = createMockBuilder(ActionManager.class).
+        addMockedMethod("getTasks").
+        withConstructor((long)0, (long)0, actionQueueMock, clustersMock,
+            actionDBAccessor, new HostsMap((String) null), unitOfWork,
+            injector.getInstance(RequestFactory.class), configurationMock, createNiceMock(AmbariEventPublisher.class)).
+        createMock();
+    return actionManager;
+  }
+
+  public Cluster getDummyCluster()
+      throws AmbariException {
+    StackEntity stackEntity = stackDAO.find(HDP_22_STACK.getStackName(), HDP_22_STACK.getStackVersion());
+    org.junit.Assert.assertNotNull(stackEntity);
+
+    // Create the cluster
+    ResourceTypeEntity resourceTypeEntity = new ResourceTypeEntity();
+    resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
+    resourceTypeEntity.setName(ResourceType.CLUSTER.name());
+    resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
+
+    ResourceEntity resourceEntity = new ResourceEntity();
+    resourceEntity.setResourceType(resourceTypeEntity);
+
+    ClusterEntity clusterEntity = new ClusterEntity();
+    clusterEntity.setClusterName(DummyCluster);
+    clusterEntity.setClusterInfo("test_cluster_info1");
+    clusterEntity.setResource(resourceEntity);
+    clusterEntity.setDesiredStack(stackEntity);
+
+    clusterDAO.create(clusterEntity);
+
+    StackId stackId = new StackId(DummyStackId);
+
+    Cluster cluster = clusters.getCluster(DummyCluster);
+
+    cluster.setDesiredStackVersion(stackId);
+    cluster.setCurrentStackVersion(stackId);
+    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+    cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
+        RepositoryVersionState.UPGRADING);
+
+    Set<String> hostNames = new HashSet<String>(){{
+      add(DummyHostname1);
+    }};
+
+    Map<String, String> hostAttributes = new HashMap<String, String>();
+    hostAttributes.put("os_family", "redhat");
+    hostAttributes.put("os_release_version", "6.3");
+
+    List<HostEntity> hostEntities = new ArrayList<HostEntity>();
+    for(String hostName : hostNames) {
+      clusters.addHost(hostName);
+      Host host = clusters.getHost(hostName);
+      host.setHostAttributes(hostAttributes);
+      host.persist();
+
+      HostEntity hostEntity = hostDAO.findByName(hostName);
+      Assert.assertNotNull(hostEntity);
+      hostEntities.add(hostEntity);
+    }
+    clusterEntity.setHostEntities(hostEntities);
+    clusters.mapHostsToCluster(hostNames, DummyCluster);
+
+    return cluster;
+  }
+
+  public void populateActionDB(ActionDBAccessor db, String DummyHostname1, long requestId, long stageId) throws AmbariException {
+    Stage s = stageFactory.createNew(requestId, "/a/b", DummyCluster, 1L, "heartbeat handler test",
+        "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+    s.setStageId(stageId);
+    String filename = null;
+    s.addHostRoleExecutionCommand(DummyHostname1, Role.HBASE_MASTER,
+        RoleCommand.START,
+        new ServiceComponentHostStartEvent(Role.HBASE_MASTER.toString(),
+            DummyHostname1, System.currentTimeMillis()), DummyCluster, HBASE, false, false);
+    List<Stage> stages = new ArrayList<Stage>();
+    stages.add(s);
+    Request request = new Request(stages, clusters);
+    db.persistActions(request);
+  }
+
+}