You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ad...@apache.org on 2017/05/23 09:52:59 UTC

[01/50] [abbrv] ambari git commit: AMBARI-21037 When adding services, new versions of configs get added for the config types associated with config groups. (atkach)

Repository: ambari
Updated Branches:
  refs/heads/ambari-rest-api-explorer 51fc3cf77 -> 651bdcbdf


AMBARI-21037 When adding services, new versions of configs get added for the config types associated with config groups. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/735c4137
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/735c4137
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/735c4137

Branch: refs/heads/ambari-rest-api-explorer
Commit: 735c4137cb2cbb95c6da44809a447c148c7c6f9d
Parents: 4427a33
Author: Andrii Tkach <at...@apache.org>
Authored: Wed May 17 13:57:49 2017 +0300
Committer: Andrii Tkach <at...@apache.org>
Committed: Wed May 17 18:15:20 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/controllers/wizard.js            |  6 ++--
 .../app/controllers/wizard/step7_controller.js  |  1 +
 .../app/mixins/common/configs/configs_saver.js  | 32 ++++++++++++++------
 .../mixins/common/configs/configs_saver_test.js | 13 ++++++++
 4 files changed, 39 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/735c4137/ambari-web/app/controllers/wizard.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard.js b/ambari-web/app/controllers/wizard.js
index c3a54cf..a8a0249 100644
--- a/ambari-web/app/controllers/wizard.js
+++ b/ambari-web/app/controllers/wizard.js
@@ -1031,16 +1031,16 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, App.ThemesMappingM
           })
         });
         //configGroup copied into plain JS object to avoid Converting circular structure to JSON
-        var hostNames = configGroup.get('hosts').map(function(host_name) {return hosts[host_name].id;});
+        var hostIds = configGroup.get('hosts').map(function(host_name) {return hosts[host_name].id;});
         serviceConfigGroups.push({
           id: configGroup.get('id'),
           name: configGroup.get('name'),
           description: configGroup.get('description'),
-          hosts: hostNames.slice(),
+          hosts: hostIds.slice(),
           properties: properties.slice(),
           is_default: configGroup.get('isDefault'),
           is_for_installed_service: isForInstalledService,
-          is_for_update: configGroup.isForUpdate || configGroup.get('hash') != this.getConfigGroupHash(configGroup, hostNames),
+          is_for_update: configGroup.get('isForUpdate') || configGroup.get('hash') !== this.getConfigGroupHash(configGroup, configGroup.get('hosts')),
           service_name: configGroup.get('serviceName'),
           service_id: configGroup.get('serviceName'),
           desired_configs: configGroup.get('desiredConfigs'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/735c4137/ambari-web/app/controllers/wizard/step7_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js b/ambari-web/app/controllers/wizard/step7_controller.js
index 8e14b70..6685c01 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -1186,6 +1186,7 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
               this.get('stepConfigs').findProperty('serviceName', service.serviceName).get('configs').pushObject(overriddenSCP);
             }
           }, this);
+          modelGroup.set('hash', this.get('wizardController').getConfigGroupHash(modelGroup));
         }, this);
         service.set('configGroups', App.ServiceConfigGroup.find().filterProperty('serviceName', service.get('serviceName')));
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/735c4137/ambari-web/app/mixins/common/configs/configs_saver.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/configs/configs_saver.js b/ambari-web/app/mixins/common/configs/configs_saver.js
index 7d8721d..4a4163e 100644
--- a/ambari-web/app/mixins/common/configs/configs_saver.js
+++ b/ambari-web/app/mixins/common/configs/configs_saver.js
@@ -368,10 +368,10 @@ App.ConfigsSaverMixin = Em.Mixin.create({
   /*********************************** 3. GENERATING JSON TO SAVE *****************************/
 
   /**
-   * Map that contains last used timestamp per filename.
+   * Map that contains last used timestamp.
    * There is a case when two config groups can update same filename almost simultaneously
-   * so they have equal timestamp only and this causes collision. So to prevent this we need to check
-   * if specific filename with specific timestamp is not saved yet
+   * so they have equal timestamp and this causes collision. So to prevent this we need to check
+   * if specific filename with specific timestamp is not saved yet.
    *
    * @type {Object}
    */
@@ -389,14 +389,9 @@ App.ConfigsSaverMixin = Em.Mixin.create({
     var desired_config = [];
     if (Em.isArray(configsToSave) && Em.isArray(fileNamesToSave) && fileNamesToSave.length && configsToSave.length) {
       serviceConfigNote = serviceConfigNote || "";
-      var tagVersion = "version" + (new Date).getTime();
-      fileNamesToSave.forEach(function(fName) {
 
-        /** @see <code>_timeStamps<code> **/
-        if (this.get('_timeStamps')[fName] === tagVersion) {
-          tagVersion = "version" + ((new Date).getTime() + 1);
-        }
-        this.get('_timeStamps')[fName] = tagVersion;
+      fileNamesToSave.forEach(function(fName) {
+        var tagVersion = this.getUniqueTag();
 
         if (this.allowSaveSite(fName)) {
           var properties = configsToSave.filterProperty('filename', fName);
@@ -409,6 +404,23 @@ App.ConfigsSaverMixin = Em.Mixin.create({
   },
 
   /**
+   * generate unique tag
+   * @returns {string}
+   */
+  getUniqueTag: function() {
+    var timestamp = (new Date).getTime();
+    var tagVersion = "version" + timestamp;
+
+    while(this.get('_timeStamps')[tagVersion]) {
+      timestamp++;
+      tagVersion = "version" + timestamp;
+    }
+    /** @see <code>_timeStamps<code> **/
+    this.get('_timeStamps')[tagVersion] = true;
+    return tagVersion;
+  },
+
+  /**
    * For some file names we have a restriction
    * and can't save them, in this case method will return false
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/735c4137/ambari-web/test/mixins/common/configs/configs_saver_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/mixins/common/configs/configs_saver_test.js b/ambari-web/test/mixins/common/configs/configs_saver_test.js
index 6e65cf9..7815938 100644
--- a/ambari-web/test/mixins/common/configs/configs_saver_test.js
+++ b/ambari-web/test/mixins/common/configs/configs_saver_test.js
@@ -259,6 +259,19 @@ describe('App.ConfigsSaverMixin', function() {
     })
   });
 
+  describe('#getUniqueTag', function() {
+
+    it('should generate unique tags', function() {
+      var tags = [];
+      for (var i = 0; i < 3; i++) {
+        tags.push(mixin.getUniqueTag());
+      }
+      expect(tags[1]).to.not.be.equal(tags[0]);
+      expect(tags[2]).to.not.be.equal(tags[1]);
+      expect(tags[0]).to.not.be.equal(tags[2]);
+    });
+  });
+
   describe('#getModifiedConfigs', function () {
     var configs = [
       Em.Object.create({


[18/50] [abbrv] ambari git commit: AMBARI-21051. HDP 3.0 TP - create Service Advisor for Storm.(vbrodetsky)

Posted by ad...@apache.org.
AMBARI-21051. HDP 3.0 TP - create Service Advisor for Storm.(vbrodetsky)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7dc2ddc7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7dc2ddc7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7dc2ddc7

Branch: refs/heads/ambari-rest-api-explorer
Commit: 7dc2ddc72bce3dc944191c7797d2672c1f3b4fcb
Parents: 6ab4d28
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Thu May 18 20:39:00 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Thu May 18 20:39:00 2017 +0300

----------------------------------------------------------------------
 .../STORM/1.0.1.3.0/service_advisor.py          | 387 +++++++++++++++++++
 1 file changed, 387 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7dc2ddc7/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/service_advisor.py
new file mode 100644
index 0000000..1d6bbe0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/service_advisor.py
@@ -0,0 +1,387 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python imports
+import imp
+import os
+import traceback
+import re
+import socket
+import fnmatch
+
+
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.get_bare_principal import get_bare_principal
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+try:
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+
+class StormServiceAdvisor(service_advisor.ServiceAdvisor):
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(StormServiceAdvisor, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    # Always call these methods
+    self.modifyMastersWithMultipleInstances()
+    self.modifyCardinalitiesDict()
+    self.modifyHeapSizeProperties()
+    self.modifyNotValuableComponents()
+    self.modifyComponentsNotPreferableOnServer()
+    self.modifyComponentLayoutSchemes()
+
+  def modifyMastersWithMultipleInstances(self):
+    """
+    Modify the set of masters with multiple instances.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyCardinalitiesDict(self):
+    """
+    Modify the dictionary of cardinalities.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyHeapSizeProperties(self):
+    """
+    Modify the dictionary of heap size properties.
+    Must be overriden in child class.
+    """
+    pass
+
+  def modifyNotValuableComponents(self):
+    """
+    Modify the set of components whose host assignment is based on other services.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentsNotPreferableOnServer(self):
+    """
+    Modify the set of components that are not preferable on the server.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentLayoutSchemes(self):
+    """
+    Modify layout scheme dictionaries for components.
+    The scheme dictionary basically maps the number of hosts to
+    host index where component should exist.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def getServiceComponentLayoutValidations(self, services, hosts):
+    """
+    Get a list of errors.
+    Must be overriden in child class.
+    """
+
+    return []
+
+  def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
+    """
+    Entry point.
+    Must be overriden in child class.
+    """
+    #Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    recommender = StormRecommender()
+    recommender.recommendStormConfigurationsFromHDP206(configurations, clusterData, services, hosts)
+    recommender.recommendStormConfigurationsFromHDP21(configurations, clusterData, services, hosts)
+    recommender.recommendStormConfigurationsFromHDP22(configurations, clusterData, services, hosts)
+    recommender.recommendStormConfigurationsFromHDP23(configurations, clusterData, services, hosts)
+    recommender.recommendStormConfigurationsFromHDP25(configurations, clusterData, services, hosts)
+
+
+
+  def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
+    """
+    Entry point.
+    Validate configurations for the service. Return a list of errors.
+    The code for this function should be the same for each Service Advisor.
+    """
+    #Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    validator = StormValidator()
+    # Calls the methods of the validator using arguments,
+    # method(siteProperties, siteRecommendations, configurations, services, hosts)
+    return validator.validateListOfConfigUsingMethod(configurations, recommendedDefaults, services, hosts, validator.validators)
+
+
+
+class StormRecommender(service_advisor.ServiceAdvisor):
+  """
+  Storm Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(StormRecommender, self)
+    self.as_super.__init__(*args, **kwargs)
+
+
+  def recommendStormConfigurationsFromHDP206(self, configurations, clusterData, services, hosts):
+    putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    # Storm AMS integration
+    if 'AMBARI_METRICS' in servicesList:
+      putStormSiteProperty('metrics.reporter.register', 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter')
+
+
+  def recommendStormConfigurationsFromHDP21(self, configurations, clusterData, services, hosts):
+    storm_mounts = [
+      ("storm.local.dir", ["NODEMANAGER", "NIMBUS"], "/hadoop/storm", "single")
+    ]
+
+    self.updateMountProperties("storm-site", storm_mounts, configurations, services, hosts)
+
+
+  def recommendStormConfigurationsFromHDP22(self, configurations, clusterData, services, hosts):
+    putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
+    putStormSiteAttributes = self.putPropertyAttribute(configurations, "storm-site")
+    storm_site = self.getServicesSiteProperties(services, "storm-site")
+    security_enabled = self.isSecurityEnabled(services)
+    if "ranger-env" in services["configurations"] and "ranger-storm-plugin-properties" in services["configurations"] and \
+        "ranger-storm-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      putStormRangerPluginProperty = self.putProperty(configurations, "ranger-storm-plugin-properties", services)
+      rangerEnvStormPluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-storm-plugin-enabled"]
+      putStormRangerPluginProperty("ranger-storm-plugin-enabled", rangerEnvStormPluginProperty)
+
+    rangerPluginEnabled = ''
+    if 'ranger-storm-plugin-properties' in configurations and 'ranger-storm-plugin-enabled' in  configurations['ranger-storm-plugin-properties']['properties']:
+      rangerPluginEnabled = configurations['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
+    elif 'ranger-storm-plugin-properties' in services['configurations'] and 'ranger-storm-plugin-enabled' in services['configurations']['ranger-storm-plugin-properties']['properties']:
+      rangerPluginEnabled = services['configurations']['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
+
+    nonRangerClass = 'backtype.storm.security.auth.authorizer.SimpleACLAuthorizer'
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    rangerServiceVersion=''
+    if 'RANGER' in servicesList:
+      rangerServiceVersion = [service['StackServices']['service_version'] for service in services["services"] if service['StackServices']['service_name'] == 'RANGER'][0]
+
+    if rangerServiceVersion and rangerServiceVersion == '0.4.0':
+      rangerClass = 'com.xasecure.authorization.storm.authorizer.XaSecureStormAuthorizer'
+    else:
+      rangerClass = 'org.apache.ranger.authorization.storm.authorizer.RangerStormAuthorizer'
+    # Cluster is kerberized
+    if security_enabled:
+      if rangerPluginEnabled and (rangerPluginEnabled.lower() == 'Yes'.lower()):
+        putStormSiteProperty('nimbus.authorizer',rangerClass)
+      else:
+        putStormSiteProperty('nimbus.authorizer', nonRangerClass)
+    else:
+      putStormSiteAttributes('nimbus.authorizer', 'delete', 'true')
+
+
+  def recommendStormConfigurationsFromHDP23(self, configurations, clusterData, services, hosts):
+    putStormStartupProperty = self.putProperty(configurations, "storm-site", services)
+    putStormEnvProperty = self.putProperty(configurations, "storm-env", services)
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    if "storm-site" in services["configurations"]:
+      # atlas
+      notifier_plugin_property = "storm.topology.submission.notifier.plugin.class"
+      if notifier_plugin_property in services["configurations"]["storm-site"]["properties"] and \
+         services["configurations"]["storm-site"]["properties"][notifier_plugin_property] is not None:
+
+        notifier_plugin_value = services["configurations"]["storm-site"]["properties"][notifier_plugin_property]
+      else:
+        notifier_plugin_value = " "
+
+      atlas_is_present = "ATLAS" in servicesList
+      atlas_hook_class = "org.apache.atlas.storm.hook.StormAtlasHook"
+      atlas_hook_is_set = atlas_hook_class in notifier_plugin_value
+      enable_atlas_hook = False
+      enable_external_atlas_for_storm = False
+
+      if 'storm-atlas-application.properties' in services['configurations'] and 'enable.external.atlas.for.storm' in services['configurations']['storm-atlas-application.properties']['properties']:
+        enable_external_atlas_for_storm = services['configurations']['storm-atlas-application.properties']['properties']['enable.external.atlas.for.storm'].lower() == "true"
+
+      if atlas_is_present:
+        putStormEnvProperty("storm.atlas.hook", "true")
+      elif enable_external_atlas_for_storm:
+        putStormEnvProperty("storm.atlas.hook", "true")
+      else:
+        putStormEnvProperty("storm.atlas.hook", "false")
+
+      if 'storm-env' in configurations and 'storm.atlas.hook' in configurations['storm-env']['properties']:
+        enable_atlas_hook = configurations['storm-env']['properties']['storm.atlas.hook'] == "true"
+      elif 'storm-env' in services['configurations'] and 'storm.atlas.hook' in services['configurations']['storm-env']['properties']:
+        enable_atlas_hook = services['configurations']['storm-env']['properties']['storm.atlas.hook'] == "true"
+
+      if enable_atlas_hook and not atlas_hook_is_set:
+        notifier_plugin_value = atlas_hook_class if notifier_plugin_value == " " else ",".join([notifier_plugin_value, atlas_hook_class])
+
+      if not enable_atlas_hook and atlas_hook_is_set:
+        application_classes = [item for item in notifier_plugin_value.split(",") if item != atlas_hook_class and item != " "]
+        notifier_plugin_value = ",".join(application_classes) if application_classes else " "
+
+      if notifier_plugin_value.strip() != "":
+        putStormStartupProperty(notifier_plugin_property, notifier_plugin_value)
+      else:
+        putStormStartupPropertyAttribute = self.putPropertyAttribute(configurations, "storm-site")
+        putStormStartupPropertyAttribute(notifier_plugin_property, 'delete', 'true')
+
+
+
+  def recommendStormConfigurationsFromHDP25(self, configurations, clusterData, services, hosts):
+    storm_site = self.getServicesSiteProperties(services, "storm-site")
+    storm_env = self.getServicesSiteProperties(services, "storm-env")
+    putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
+    putStormSiteAttributes = self.putPropertyAttribute(configurations, "storm-site")
+    security_enabled = self.isSecurityEnabled(services)
+
+    if storm_env and storm_site:
+      if security_enabled:
+        _storm_principal_name = storm_env['storm_principal_name'] if 'storm_principal_name' in storm_env else None
+        storm_bare_jaas_principal = get_bare_principal(_storm_principal_name)
+        if 'nimbus.impersonation.acl' in storm_site:
+          storm_nimbus_impersonation_acl = storm_site["nimbus.impersonation.acl"]
+          storm_nimbus_impersonation_acl.replace('{{storm_bare_jaas_principal}}', storm_bare_jaas_principal)
+          putStormSiteProperty('nimbus.impersonation.acl', storm_nimbus_impersonation_acl)
+      else:
+        if 'nimbus.impersonation.acl' in storm_site:
+          putStormSiteAttributes('nimbus.impersonation.acl', 'delete', 'true')
+        if 'nimbus.impersonation.authorizer' in storm_site:
+          putStormSiteAttributes('nimbus.impersonation.authorizer', 'delete', 'true')
+
+    rangerPluginEnabled = ''
+    if 'ranger-storm-plugin-properties' in configurations and 'ranger-storm-plugin-enabled' in  configurations['ranger-storm-plugin-properties']['properties']:
+      rangerPluginEnabled = configurations['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
+    elif 'ranger-storm-plugin-properties' in services['configurations'] and 'ranger-storm-plugin-enabled' in services['configurations']['ranger-storm-plugin-properties']['properties']:
+      rangerPluginEnabled = services['configurations']['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
+
+    storm_authorizer_class = 'org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer'
+    ranger_authorizer_class = 'org.apache.ranger.authorization.storm.authorizer.RangerStormAuthorizer'
+    # Cluster is kerberized
+    if security_enabled:
+      if rangerPluginEnabled and (rangerPluginEnabled.lower() == 'Yes'.lower()):
+        putStormSiteProperty('nimbus.authorizer',ranger_authorizer_class)
+      else:
+        putStormSiteProperty('nimbus.authorizer', storm_authorizer_class)
+    else:
+      putStormSiteAttributes('nimbus.authorizer', 'delete', 'true')
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    # Storm AMS integration
+    if 'AMBARI_METRICS' in servicesList:
+      putStormSiteProperty('storm.cluster.metrics.consumer.register', '[{"class": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter"}]')
+      putStormSiteProperty('topology.metrics.consumer.register',
+                           '[{"class": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink", '
+                           '"parallelism.hint": 1, '
+                           '"whitelist": ["kafkaOffset\\\..+/", "__complete-latency", "__process-latency", '
+                           '"__receive\\\.population$", "__sendqueue\\\.population$", "__execute-count", "__emit-count", '
+                           '"__ack-count", "__fail-count", "memory/heap\\\.usedBytes$", "memory/nonHeap\\\.usedBytes$", '
+                           '"GC/.+\\\.count$", "GC/.+\\\.timeMs$"]}]')
+    else:
+      putStormSiteProperty('storm.cluster.metrics.consumer.register', 'null')
+      putStormSiteProperty('topology.metrics.consumer.register', 'null')
+
+
+class StormValidator(service_advisor.ServiceAdvisor):
+  """
+  Storm Validator checks the correctness of properties whenever the service is first added or the user attempts to
+  change configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(StormValidator, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    self.validators = [("storm-site", self.validateStormConfigurationsFromHDP206),
+                       ("ranger-storm-plugin-properties", self.validateStormRangerPluginConfigurationsFromHDP22),
+                       ("storm-site", self.validateStormConfigurationsFromHDP25)]
+
+
+
+  def validateStormConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    # Storm AMS integration
+    if 'AMBARI_METRICS' in servicesList and "metrics.reporter.register" in properties and \
+      "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter" not in properties.get("metrics.reporter.register"):
+
+      validationItems.append({"config-name": 'metrics.reporter.register',
+                              "item": self.getWarnItem(
+                                "Should be set to org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter to report the metrics to Ambari Metrics service.")})
+
+    return self.toConfigurationValidationProblems(validationItems, "storm-site")
+
+
+  def validateStormRangerPluginConfigurationsFromHDP22(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    ranger_plugin_properties = self.getSiteProperties(configurations, "ranger-storm-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-storm-plugin-enabled'] if ranger_plugin_properties else 'No'
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    security_enabled = self.isSecurityEnabled(services)
+    if 'RANGER' in servicesList and ranger_plugin_enabled.lower() == 'yes':
+      # ranger-hdfs-plugin must be enabled in ranger-env
+      ranger_env = self.getServicesSiteProperties(services, 'ranger-env')
+      if not ranger_env or not 'ranger-storm-plugin-enabled' in ranger_env or \
+          ranger_env['ranger-storm-plugin-enabled'].lower() != 'yes':
+        validationItems.append({"config-name": 'ranger-storm-plugin-enabled',
+                                "item": self.getWarnItem(
+                                  "ranger-storm-plugin-properties/ranger-storm-plugin-enabled must correspond ranger-env/ranger-storm-plugin-enabled")})
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()) and not security_enabled:
+      validationItems.append({"config-name": "ranger-storm-plugin-enabled",
+                              "item": self.getWarnItem(
+                                "Ranger Storm plugin should not be enabled in non-kerberos environment.")})
+
+    return self.toConfigurationValidationProblems(validationItems, "ranger-storm-plugin-properties")
+
+
+  def validateStormConfigurationsFromHDP25(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    # Storm AMS integration
+    if 'AMBARI_METRICS' in servicesList:
+      if "storm.cluster.metrics.consumer.register" in properties and \
+          'null' in properties.get("storm.cluster.metrics.consumer.register"):
+
+        validationItems.append({"config-name": 'storm.cluster.metrics.consumer.register',
+                              "item": self.getWarnItem(
+                                "Should be set to recommended value to report metrics to Ambari Metrics service.")})
+
+      if "topology.metrics.consumer.register" in properties and \
+          'null' in properties.get("topology.metrics.consumer.register"):
+
+        validationItems.append({"config-name": 'topology.metrics.consumer.register',
+                                "item": self.getWarnItem(
+                                  "Should be set to recommended value to report metrics to Ambari Metrics service.")})
+
+    return self.toConfigurationValidationProblems(validationItems, "storm-site")
\ No newline at end of file


[29/50] [abbrv] ambari git commit: Revert "AMBARI-21011. Append PATH to YARN config 'yarn.nodemanager.admin-env' for HDP 2.6."

Posted by ad...@apache.org.
Revert "AMBARI-21011. Append PATH to YARN config 'yarn.nodemanager.admin-env' for HDP 2.6."

This reverts commit 6e4331e92f6b42fab3d36ea64df42019ae73e715


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/55af3363
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/55af3363
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/55af3363

Branch: refs/heads/ambari-rest-api-explorer
Commit: 55af3363582162487eef3363f5ae5c4a16872652
Parents: 0a61f98
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Sat May 20 00:34:11 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Sat May 20 00:34:37 2017 -0700

----------------------------------------------------------------------
 .../HDP/2.6/services/YARN/configuration/yarn-site.xml       | 9 ---------
 1 file changed, 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/55af3363/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml
index 754a2c2..cab0e65 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml
@@ -102,15 +102,6 @@
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
-    <name>yarn.nodemanager.admin-env</name>
-    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX,PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:$PATH</value>
-    <description>
-      Environment variables that should be forwarded from the NodeManager's
-      environment to the container's.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
     <name>yarn.nodemanager.kill-escape.launch-command-line</name>
     <value>slider-agent,LLAP</value>
     <on-ambari-upgrade add="false"/>


[17/50] [abbrv] ambari git commit: AMBARI-21048. HDP 3.0 TP - create service definition for Storm with configs, kerberos, widgets, etc.(vbrodetsky)

Posted by ad...@apache.org.
AMBARI-21048. HDP 3.0 TP - create service definition for Storm with configs, kerberos, widgets, etc.(vbrodetsky)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6ab4d28a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6ab4d28a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6ab4d28a

Branch: refs/heads/ambari-rest-api-explorer
Commit: 6ab4d28a6973cec9a2d04592bfa6fcdfcf081988
Parents: 0e5f247
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Thu May 18 20:33:04 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Thu May 18 20:33:43 2017 +0300

----------------------------------------------------------------------
 .../common-services/STORM/1.0.1.3.0/alerts.json |  145 +++
 .../configuration/ranger-storm-audit.xml        |  133 ++
 .../ranger-storm-plugin-properties.xml          |  121 ++
 .../ranger-storm-policymgr-ssl.xml              |   70 +
 .../configuration/ranger-storm-security.xml     |   67 +
 .../storm-atlas-application.properties.xml      |   31 +
 .../configuration/storm-cluster-log4j.xml       |  133 ++
 .../STORM/1.0.1.3.0/configuration/storm-env.xml |  165 +++
 .../1.0.1.3.0/configuration/storm-site.xml      | 1002 +++++++++++++++
 .../configuration/storm-worker-log4j.xml        |  189 +++
 .../STORM/1.0.1.3.0/kerberos.json               |  134 ++
 .../STORM/1.0.1.3.0/metainfo.xml                |  179 +++
 .../STORM/1.0.1.3.0/metrics.json                | 1202 ++++++++++++++++++
 .../alerts/check_supervisor_process_win.py      |   50 +
 .../STORM/1.0.1.3.0/package/files/wordCount.jar |  Bin 0 -> 690588 bytes
 .../1.0.1.3.0/package/scripts/drpc_server.py    |   91 ++
 .../STORM/1.0.1.3.0/package/scripts/nimbus.py   |  116 ++
 .../1.0.1.3.0/package/scripts/nimbus_prod.py    |   81 ++
 .../1.0.1.3.0/package/scripts/pacemaker.py      |   90 ++
 .../STORM/1.0.1.3.0/package/scripts/params.py   |   28 +
 .../1.0.1.3.0/package/scripts/params_linux.py   |  424 ++++++
 .../1.0.1.3.0/package/scripts/params_windows.py |   60 +
 .../STORM/1.0.1.3.0/package/scripts/rest_api.py |   85 ++
 .../STORM/1.0.1.3.0/package/scripts/service.py  |   95 ++
 .../1.0.1.3.0/package/scripts/service_check.py  |   79 ++
 .../package/scripts/setup_ranger_storm.py       |  133 ++
 .../1.0.1.3.0/package/scripts/status_params.py  |   83 ++
 .../STORM/1.0.1.3.0/package/scripts/storm.py    |  182 +++
 .../1.0.1.3.0/package/scripts/storm_upgrade.py  |  177 +++
 .../package/scripts/storm_yaml_utils.py         |   53 +
 .../1.0.1.3.0/package/scripts/supervisor.py     |  117 ++
 .../package/scripts/supervisor_prod.py          |   84 ++
 .../package/scripts/supervisord_service.py      |   33 +
 .../1.0.1.3.0/package/scripts/ui_server.py      |  137 ++
 .../package/templates/client_jaas.conf.j2       |   33 +
 .../1.0.1.3.0/package/templates/config.yaml.j2  |   75 ++
 .../templates/input.config-storm.json.j2        |   78 ++
 .../templates/storm-metrics2.properties.j2      |   34 +
 .../1.0.1.3.0/package/templates/storm.conf.j2   |   35 +
 .../package/templates/storm_jaas.conf.j2        |   65 +
 .../package/templates/worker-launcher.cfg.j2    |   19 +
 .../STORM/1.0.1.3.0/quicklinks/quicklinks.json  |   45 +
 .../STORM/1.0.1.3.0/role_command_order.json     |   13 +
 .../STORM/1.0.1.3.0/widgets.json                |  127 ++
 .../stacks/HDP/3.0/services/STORM/metainfo.xml  |   27 +
 45 files changed, 6320 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/alerts.json b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/alerts.json
new file mode 100644
index 0000000..acd9d85
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/alerts.json
@@ -0,0 +1,145 @@
+{
+  "STORM": {
+    "service": [
+      {
+        "name": "storm_supervisor_process_percent",
+        "label": "Percent Supervisors Available",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "storm_supervisor_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 10
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 30
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          }
+        }
+      }
+    ],
+    "STORM_UI_SERVER": [
+      {
+        "name": "storm_webui",
+        "label": "Storm Web UI",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{storm-site/ui.port}}",
+            "https" : "{{storm-site/ui.https.port}}",
+            "kerberos_keytab": "{{storm-env/storm_ui_keytab}}",
+            "kerberos_principal": "{{storm-env/storm_ui_principal_name}}",
+            "connection_timeout": 5.0,
+            "https_property": "{{storm-site/ui.https.keystore.type}}",
+            "https_property_value": "jks"
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      }      
+    ],
+    "NIMBUS": [
+      {
+        "name": "storm_nimbus_process",
+        "label": "Nimbus Process",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{storm-site/nimbus.thrift.port}}",
+          "default_port": 6627,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],
+    "DRPC_SERVER": [
+      {
+        "name": "storm_drpc_server",
+        "label": "DRPC Server Process",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{storm-site/drpc.port}}",
+          "default_port": 3772,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],
+    "SUPERVISOR": [
+      {
+        "name": "storm_supervisor_process",
+        "label": "Supervisor Process",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{storm-env/jmxremote_port}}",
+          "default_port": 56431,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-audit.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-audit.xml
new file mode 100644
index 0000000..18a6c93
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-audit.xml
@@ -0,0 +1,133 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/storm/audit/hdfs/spool</value>
+    <description>/var/log/storm/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value/>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/storm/audit/solr/spool</value>
+    <description>/var/log/storm/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>false</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+  <property>
+    <name>ranger.plugin.storm.ambari.cluster.name</name>
+    <value>{{cluster_name}}</value>
+    <description>Capture cluster name from where Ranger storm plugin is enabled.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-plugin-properties.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-plugin-properties.xml
new file mode 100644
index 0000000..99f6e4d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-plugin-properties.xml
@@ -0,0 +1,121 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>policy_user</name>
+    <value>{{policy_user}}</value>
+    <display-name>Policy user for STORM</display-name>
+    <description>This user must be system user and also present at Ranger admin portal</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>common.name.for.certificate</name>
+    <value/>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-storm-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for STORM</display-name>
+    <description>Enable ranger storm plugin ?</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-storm-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>stormtestuser@EXAMPLE.COM</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>stormtestuser</value>
+    <display-name>Ranger repository config password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-policymgr-ssl.xml
new file mode 100644
index 0000000..cec82b0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-policymgr-ssl.xml
@@ -0,0 +1,70 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/hdp/current/storm-client/conf/ranger-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/hdp/current/storm-client/conf/ranger-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-security.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-security.xml
new file mode 100644
index 0000000..7b1ed0f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-security.xml
@@ -0,0 +1,67 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.plugin.storm.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing policies for this Storm instance</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.storm.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.storm.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>ranger.plugin.storm.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.storm.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+  <property>
+    <name>ranger.plugin.storm.policy.rest.ssl.config.file</name>
+    <value>/usr/hdp/current/storm-client/conf/ranger-policymgr-ssl.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-atlas-application.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-atlas-application.properties.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-atlas-application.properties.xml
new file mode 100644
index 0000000..47d7758
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-atlas-application.properties.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+  <!-- These are the Atlas Hooks properties specific to this service. This file is then merged with common properties
+  that apply to all services. -->
+  <property>
+    <name>atlas.hook.storm.numRetries</name>
+    <value>3</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-cluster-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-cluster-log4j.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-cluster-log4j.xml
new file mode 100644
index 0000000..d7f7ae0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-cluster-log4j.xml
@@ -0,0 +1,133 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="false">
+
+  <property>
+    <name>storm_a1_maxfilesize</name>
+    <value>100</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Storm Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>storm_a1_maxbackupindex</name>
+    <value>9</value>
+    <description>The number of backup files</description>
+    <display-name>Storm Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>storm-cluster-log4j template</display-name>
+    <description>Custom cluster.xml</description>
+    <value><![CDATA[
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration monitorInterval="60">
+<properties>
+    <property name="pattern">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} %t [%p] %msg%n</property>
+</properties>
+<appenders>
+    <RollingFile name="A1" immediateFlush="false"
+                 fileName="${sys:storm.log.dir}/${sys:logfile.name}"
+                 filePattern="${sys:storm.log.dir}/${sys:logfile.name}.%i.gz">
+        <PatternLayout>
+            <pattern>${pattern}</pattern>
+        </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="{{storm_a1_maxfilesize}} MB"/> <!-- Or every 100 MB -->
+        </Policies>
+        <DefaultRolloverStrategy max="{{storm_a1_maxbackupindex}}"/>
+    </RollingFile>
+    <RollingFile name="WEB-ACCESS" immediateFlush="false"
+                 fileName="${sys:storm.log.dir}/access-web-${sys:daemon.name}.log"
+                 filePattern="${sys:storm.log.dir}/access-web-${sys:daemon.name}.log.%i.gz">
+        <PatternLayout>
+            <pattern>${pattern}</pattern>
+        </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="100 MB"/> <!-- Or every 100 MB -->
+        </Policies>
+        <DefaultRolloverStrategy max="9"/>
+    </RollingFile>
+    <RollingFile name="THRIFT-ACCESS" immediateFlush="false"
+                 fileName="${sys:storm.log.dir}/access-${sys:logfile.name}"
+                 filePattern="${sys:storm.log.dir}/access-${sys:logfile.name}.%i.gz">
+    <PatternLayout>
+        <pattern>${pattern}</pattern>
+    </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="100 MB"/> <!-- Or every 100 MB -->
+        </Policies>
+        <DefaultRolloverStrategy max="9"/>
+    </RollingFile>
+    <Syslog name="syslog" format="RFC5424" charset="UTF-8" host="localhost" port="514"
+            protocol="UDP" appName="[${sys:daemon.name}]" mdcId="mdc" includeMDC="true"
+            facility="LOCAL5" enterpriseNumber="18060" newLine="true" exceptionPattern="%rEx{full}"
+            messageId="[${sys:user.name}:S0]" id="storm" immediateFlush="true" immediateFail="true"/>
+</appenders>
+<loggers>
+
+    <Logger name="org.apache.storm.logging.filters.AccessLoggingFilter" level="info" additivity="false">
+        <AppenderRef ref="WEB-ACCESS"/>
+        <AppenderRef ref="syslog"/>
+    </Logger>
+    <Logger name="org.apache.storm.logging.ThriftAccessLogger" level="info" additivity="false">
+        <AppenderRef ref="THRIFT-ACCESS"/>
+        <AppenderRef ref="syslog"/>
+    </Logger>
+    <root level="info"> <!-- We log everything -->
+        <appender-ref ref="A1"/>
+        <appender-ref ref="syslog"/>
+    </root>
+</loggers>
+</configuration>
+
+    ]]></value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-env.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-env.xml
new file mode 100644
index 0000000..3ee0602
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-env.xml
@@ -0,0 +1,165 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+    <property>
+        <name>storm_user</name>
+        <display-name>Storm User</display-name>
+        <value>storm</value>
+        <property-type>USER</property-type>
+        <description/>
+        <value-attributes>
+            <type>user</type>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_log_dir</name>
+        <value>/var/log/storm</value>
+        <description/>
+        <value-attributes>
+            <type>directory</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_pid_dir</name>
+        <value>/var/run/storm</value>
+        <description/>
+        <value-attributes>
+            <type>directory</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>jmxremote_port</name>
+        <value>56431</value>
+        <description/>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_principal_name</name>
+        <description>Storm principal name</description>
+        <property-type>KERBEROS_PRINCIPAL</property-type>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_principal_name</name>
+        <description>Storm principal name</description>
+        <property-type>KERBEROS_PRINCIPAL</property-type>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_keytab</name>
+        <description>Storm keytab path</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_ui_principal_name</name>
+        <description>Storm UI principal name</description>
+        <property-type>KERBEROS_PRINCIPAL</property-type>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_ui_keytab</name>
+        <description>Storm UI keytab path</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus_keytab</name>
+        <description>Nimbus keytab path</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus_principal_name</name>
+        <description>Nimbus principal name</description>
+        <property-type>KERBEROS_PRINCIPAL</property-type>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_user_nofile_limit</name>
+        <value>128000</value>
+        <description>Max open files limit setting for STORM user.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_user_nproc_limit</name>
+        <value>65536</value>
+        <description>Max number of processes limit setting for STORM user.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <!-- storm-env.sh -->
+
+
+    <property>
+        <name>storm.atlas.hook</name>
+        <value>false</value>
+        <display-name>Enable Atlas Hook</display-name>
+        <description>Enable Atlas Hook</description>
+        <value-attributes>
+            <type>boolean</type>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+        <depends-on>
+            <property>
+                <type>application-properties</type>
+                <name>atlas.rest.address</name>
+            </property>
+        </depends-on>
+    </property>
+    <property>
+        <name>nimbus_seeds_supported</name>
+        <value>true</value>
+        <description/>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_logs_supported</name>
+        <value>true</value>
+        <description/>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <!-- storm-env.sh -->
+    <property>
+        <name>content</name>
+        <display-name>storm-env template</display-name>
+        <description>This is the jinja template for storm-env.sh file</description>
+        <value>
+            #!/bin/bash
+
+            # Set Storm specific environment variables here.
+
+            # The java implementation to use.
+            export JAVA_HOME={{java64_home}}
+
+            export STORM_CONF_DIR={{conf_dir}}
+            export STORM_HOME={{storm_component_home_dir}}
+
+            export STORM_JAR_JVM_OPTS={{jar_jvm_opts}}
+        </value>
+        <value-attributes>
+            <type>content</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-site.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-site.xml
new file mode 100644
index 0000000..6b97fb6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-site.xml
@@ -0,0 +1,1002 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+    <property>
+        <name>storm.local.dir</name>
+        <value>/hadoop/storm</value>
+        <description>A directory on the local filesystem used by Storm for any local
+            filesystem usage it needs. The directory must exist and the Storm daemons must
+            have permission to read/write from this location.</description>
+        <value-attributes>
+            <type>directory</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.servers</name>
+        <value>['localhost']</value>
+        <description>A list of hosts of ZooKeeper servers used to manage the cluster.</description>
+        <value-attributes>
+            <type>multiLine</type>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.port</name>
+        <value>2181</value>
+        <description>The port Storm will use to connect to each of the ZooKeeper servers.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.root</name>
+        <value>/storm</value>
+        <description>The root location at which Storm stores data in ZooKeeper.</description>
+        <value-attributes>
+            <type>directory</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.session.timeout</name>
+        <value>30000</value>
+        <description>The session timeout for clients to ZooKeeper.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.connection.timeout</name>
+        <value>30000</value>
+        <description>The connection timeout for clients to ZooKeeper.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.retry.times</name>
+        <value>5</value>
+        <description>The number of times to retry a Zookeeper operation.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.retry.interval</name>
+        <value>1000</value>
+        <description>The interval between retries of a Zookeeper operation.</description>
+        <value-attributes>
+            <unit>ms</unit>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.retry.intervalceiling.millis</name>
+        <value>30000</value>
+        <description>The ceiling of the interval between retries of a Zookeeper operation.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>ms</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.cluster.mode</name>
+        <value>distributed</value>
+        <description>The mode this Storm cluster is running in. Either "distributed" or "local".</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.local.mode.zmq</name>
+        <value>false</value>
+        <description>Whether or not to use ZeroMQ for messaging in local mode. If this is set
+            to false, then Storm will use a pure-Java messaging system. The purpose
+            of this flag is to make it easy to run Storm in local mode by eliminating
+            the need for native dependencies, which can be difficult to install.
+        </description>
+        <value-attributes>
+            <type>boolean</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+
+    <property>
+        <name>nimbus.thrift.port</name>
+        <value>6627</value>
+        <description> Which port the Thrift interface of Nimbus should run on. Clients should
+            connect to this port to upload jars and submit topologies.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.thrift.max_buffer_size</name>
+        <value>1048576</value>
+        <description>The maximum buffer size thrift should use when reading messages.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>bytes</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>nimbus.task.timeout.secs</name>
+        <value>30</value>
+        <description>How long without heartbeating a task can go before nimbus will consider the task dead and reassign it to another location.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.supervisor.timeout.secs</name>
+        <value>60</value>
+        <description>How long before a supervisor can go without heartbeating before nimbus considers it dead and stops assigning new work to it.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.monitor.freq.secs</name>
+        <value>10</value>
+        <description>
+            How often nimbus should wake up to check heartbeats and do reassignments. Note
+            that if a machine ever goes down Nimbus will immediately wake up and take action.
+            This parameter is for checking for failures when there's no explicit event like that occuring.
+        </description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.cleanup.inbox.freq.secs</name>
+        <value>600</value>
+        <description>How often nimbus should wake the cleanup thread to clean the inbox.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.inbox.jar.expiration.secs</name>
+        <value>3600</value>
+        <description>
+            The length of time a jar file lives in the inbox before being deleted by the cleanup thread.
+
+            Probably keep this value greater than or equal to NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS.
+            Note that the time it takes to delete an inbox jar file is going to be somewhat more than
+            NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS (depending on how often NIMBUS_CLEANUP_FREQ_SECS is set to).
+        </description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.task.launch.secs</name>
+        <value>120</value>
+        <description>A special timeout used when a task is initially launched. During launch, this is the timeout
+            used until the first heartbeat, overriding nimbus.task.timeout.secs.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.reassign</name>
+        <value>true</value>
+        <description>Whether or not nimbus should reassign tasks if it detects that a task goes down.
+            Defaults to true, and it's not recommended to change this value.</description>
+        <value-attributes>
+            <type>boolean</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.file.copy.expiration.secs</name>
+        <value>600</value>
+        <description>During upload/download with the master, how long an upload or download connection is idle
+            before nimbus considers it dead and drops the connection.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>ui.port</name>
+        <value>8744</value>
+        <description>Storm UI binds to this port.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>logviewer.port</name>
+        <value>8000</value>
+        <description>HTTP UI port for log viewer.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>logviewer.appender.name</name>
+        <value>A1</value>
+        <description>Appender name used by log viewer to determine log directory.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.port</name>
+        <value>3772</value>
+        <description>This port is used by Storm DRPC for receiving DPRC requests from clients.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.worker.threads</name>
+        <value>64</value>
+        <description>DRPC thrift server worker threads.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.queue.size</name>
+        <value>128</value>
+        <description>DRPC thrift server queue size.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.invocations.port</name>
+        <value>3773</value>
+        <description>This port on Storm DRPC is used by DRPC topologies to receive function invocations and send results back.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.request.timeout.secs</name>
+        <value>600</value>
+        <description>The timeout on DRPC requests within the DRPC server. Defaults to 10 minutes. Note that requests can also
+            timeout based on the socket timeout on the DRPC client, and separately based on the topology message
+            timeout for the topology implementing the DRPC function.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>transactional.zookeeper.root</name>
+        <value>/transactional</value>
+        <description>The root directory in ZooKeeper for metadata about TransactionalSpouts.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>transactional.zookeeper.servers</name>
+        <value>null</value>
+        <description>The list of zookeeper servers in which to keep the transactional state. If null (which is default),
+            will use storm.zookeeper.servers</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>transactional.zookeeper.port</name>
+        <value>null</value>
+        <description>The port to use to connect to the transactional zookeeper servers. If null (which is default),
+            will use storm.zookeeper.port</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>supervisor.slots.ports</name>
+        <value>[6700, 6701]</value>
+        <description>A list of ports that can run workers on this supervisor. Each worker uses one port, and
+            the supervisor will only run one worker per port. Use this configuration to tune
+            how many workers run on each machine.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>supervisor.worker.start.timeout.secs</name>
+        <value>120</value>
+        <description>How long a worker can go without heartbeating during the initial launch before
+            the supervisor tries to restart the worker process. This value override
+            supervisor.worker.timeout.secs during launch because there is additional
+            overhead to starting and configuring the JVM on launch.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>supervisor.worker.timeout.secs</name>
+        <value>30</value>
+        <description>How long a worker can go without heartbeating before the supervisor tries to restart the worker process.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>supervisor.monitor.frequency.secs</name>
+        <value>3</value>
+        <description>How often the supervisor checks the worker heartbeats to see if any of them need to be restarted.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>supervisor.heartbeat.frequency.secs</name>
+        <value>5</value>
+        <description>How often the supervisor sends a heartbeat to the master.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>worker.heartbeat.frequency.secs</name>
+        <value>1</value>
+        <description>How often this worker should heartbeat to the supervisor.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>task.heartbeat.frequency.secs</name>
+        <value>3</value>
+        <description>How often a task should heartbeat its status to the master.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>task.refresh.poll.secs</name>
+        <value>10</value>
+        <description>How often a task should sync its connections with other tasks (if a task is
+            reassigned, the other tasks sending messages to it need to refresh their connections).
+            In general though, when a reassignment happens other tasks will be notified
+            almost immediately. This configuration is here just in case that notification doesn't
+            come through.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zmq.threads</name>
+        <value>1</value>
+        <description>The number of threads that should be used by the zeromq context in each worker process.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zmq.linger.millis</name>
+        <value>5000</value>
+        <description>How long a connection should retry sending messages to a target host when
+            the connection is closed. This is an advanced configuration and can almost
+            certainly be ignored.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zmq.hwm</name>
+        <value>0</value>
+        <description>The high water for the ZeroMQ push sockets used for networking. Use this config to prevent buffer explosion
+            on the networking layer.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.server_worker_threads</name>
+        <value>1</value>
+        <description>Netty based messaging: The # of worker threads for the server.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.client_worker_threads</name>
+        <value>1</value>
+        <description>Netty based messaging: The # of worker threads for the client.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.buffer_size</name>
+        <value>5242880</value>
+        <description>Netty based messaging: The buffer size for send/recv buffer.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>bytes</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.max_retries</name>
+        <value>30</value>
+        <description>Netty based messaging: The max # of retries that a peer will perform when a remote is not accessible.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.max_wait_ms</name>
+        <value>1000</value>
+        <description>Netty based messaging: The max # of milliseconds that a peer will wait.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>ms</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.min_wait_ms</name>
+        <value>100</value>
+        <description>Netty based messaging: The min # of milliseconds that a peer will wait.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>ms</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.enable.message.timeouts</name>
+        <value>true</value>
+        <description>True if Storm should timeout messages or not. Defaults to true. This is meant to be used
+            in unit tests to prevent tuples from being accidentally timed out during the test.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.debug</name>
+        <value>false</value>
+        <description>When set to true, Storm will log every message that's emitted.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.optimize</name>
+        <value>true</value>
+        <description>Whether or not the master should optimize topologies by running multiple tasks in a single thread where appropriate.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.workers</name>
+        <value>1</value>
+        <description>How many processes should be spawned around the cluster to execute this
+            topology. Each process will execute some number of tasks as threads within
+            them. This parameter should be used in conjunction with the parallelism hints
+            on each component in the topology to tune the performance of a topology.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.acker.executors</name>
+        <value>null</value>
+        <description>How many executors to spawn for ackers.
+
+            If this is set to 0, then Storm will immediately ack tuples as soon
+            as they come off the spout, effectively disabling reliability.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.message.timeout.secs</name>
+        <value>30</value>
+        <description>The maximum amount of time given to the topology to fully process a message
+            emitted by a spout. If the message is not acked within this time frame, Storm
+            will fail the message on the spout. Some spouts implementations will then replay
+            the message at a later time.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.skip.missing.kryo.registrations</name>
+        <value>false</value>
+        <description> Whether or not Storm should skip the loading of kryo registrations for which it
+            does not know the class or have the serializer implementation. Otherwise, the task will
+            fail to load and will throw an error at runtime. The use case of this is if you want to
+            declare your serializations on the storm.yaml files on the cluster rather than every single
+            time you submit a topology. Different applications may use different serializations and so
+            a single application may not have the code for the other serializers used by other apps.
+            By setting this config to true, Storm will ignore that it doesn't have those other serializations
+            rather than throw an error.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.max.task.parallelism</name>
+        <value>null</value>
+        <description>The maximum parallelism allowed for a component in this topology. This configuration is
+            typically used in testing to limit the number of threads spawned in local mode.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.max.spout.pending</name>
+        <value>1000</value>
+        <description>The maximum number of tuples that can be pending on a spout task at any given time.
+            This config applies to individual tasks, not to spouts or topologies as a whole.
+
+            A pending tuple is one that has been emitted from a spout but has not been acked or failed yet.
+            Note that this config parameter has no effect for unreliable spouts that don't tag
+            their tuples with a message id.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.state.synchronization.timeout.secs</name>
+        <value>60</value>
+        <description>The maximum amount of time a component gives a source of state to synchronize before it requests
+            synchronization again.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.stats.sample.rate</name>
+        <value>0.05</value>
+        <description>The percentage of tuples to sample to produce stats for a task.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.builtin.metrics.bucket.size.secs</name>
+        <value>60</value>
+        <description>The time period that builtin metrics data in bucketed into.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.fall.back.on.java.serialization</name>
+        <value>true</value>
+        <description>Whether or not to use Java serialization in a topology.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.worker.childopts</name>
+        <value>null</value>
+        <description>Topology-specific options for the worker child process. This is used in addition to WORKER_CHILDOPTS.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.executor.receive.buffer.size</name>
+        <value>1024</value>
+        <description>The size of the Disruptor receive queue for each executor. Must be a power of 2.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.executor.send.buffer.size</name>
+        <value>1024</value>
+        <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.receiver.buffer.size</name>
+        <value>8</value>
+        <description>The maximum number of messages to batch from the thread receiving off the network to the
+            executor queues. Must be a power of 2.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.transfer.buffer.size</name>
+        <value>1024</value>
+        <description>The size of the Disruptor transfer queue for each worker.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.tick.tuple.freq.secs</name>
+        <value>null</value>
+        <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
+            to tasks. Meant to be used as a component-specific configuration.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.worker.shared.thread.pool.size</name>
+        <value>4</value>
+        <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
+            via the TopologyContext.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.disruptor.wait.strategy</name>
+        <value>com.lmax.disruptor.BlockingWaitStrategy</value>
+        <description>Configure the wait strategy used for internal queuing. Can be used to tradeoff latency
+            vs. throughput.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.executor.send.buffer.size</name>
+        <value>1024</value>
+        <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.receiver.buffer.size</name>
+        <value>8</value>
+        <description>The maximum number of messages to batch from the thread receiving off the network to the
+            executor queues. Must be a power of 2.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.transfer.buffer.size</name>
+        <value>1024</value>
+        <description>The size of the Disruptor transfer queue for each worker.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.tick.tuple.freq.secs</name>
+        <value>null</value>
+        <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
+            to tasks. Meant to be used as a component-specific configuration.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.worker.shared.thread.pool.size</name>
+        <value>4</value>
+        <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
+            via the TopologyContext.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>topology.sleep.spout.wait.strategy.time.ms</name>
+        <value>1</value>
+        <description>The amount of milliseconds the SleepEmptyEmitStrategy should sleep for.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.error.throttle.interval.secs</name>
+        <value>10</value>
+        <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example,
+            an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
+            reported to Zookeeper per task for every 10 second interval of time.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.max.error.report.per.interval</name>
+        <value>5</value>
+        <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example,
+            an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
+            reported to Zookeeper per task for every 10 second interval of time.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>topology.trident.batch.emit.interval.millis</name>
+        <value>500</value>
+        <description>How often a batch can be emitted in a Trident topology.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>dev.zookeeper.path</name>
+        <value>/tmp/dev-storm-zookeeper</value>
+        <description>The path to use as the zookeeper dir when running a zookeeper server via
+            "storm dev-zookeeper". This zookeeper instance is only intended for development;
+            it is not a production grade zookeeper setup.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+
+
+    <property>
+        <name>ui.childopts</name>
+        <value>-Xmx768m _JAAS_PLACEHOLDER</value>
+        <description>Childopts for Storm UI Java process.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>ui.filter</name>
+        <value>null</value>
+        <description>Class for Storm UI authentication</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>logviewer.childopts</name>
+        <value>-Xmx128m _JAAS_PLACEHOLDER</value>
+        <description>Childopts for log viewer java process.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.childopts</name>
+        <value>-Xmx768m _JAAS_PLACEHOLDER</value>
+        <description>Childopts for Storm DRPC Java process.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>_storm.min.ruid</name>
+        <value>null</value>
+        <description>min.user.id is set to the first real user id on the system. If value is 'null' than default value will be taken from key UID_MIN of /etc/login.defs otherwise the specified value will be used for all hosts.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.log.dir</name>
+        <value>{{log_dir}}</value>
+        <description>Log directory for Storm.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.authorizer</name>
+        <description>Log directory for Storm.</description>
+        <depends-on>
+            <property>
+                <type>ranger-storm-plugin-properties</type>
+                <name>ranger-storm-plugin-enabled</name>
+            </property>
+        </depends-on>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>nimbus.seeds</name>
+        <value>localhost</value>
+        <description>Comma-delimited list of the hosts running nimbus server.</description>
+        <value-attributes>
+            <type>componentHosts</type>
+            <editable-only-at-install>true</editable-only-at-install>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.thrift.threads</name>
+        <value>196</value>
+        <description>The number of threads that should be used by the nimbus thrift server.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.min.replication.count.default</name>
+        <value>1</value>
+        <description>Default minimum number of nimbus hosts where the code must be replicated before leader nimbus can mark the topology as active and create assignments. </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.min.replication.count</name>
+        <value>{{actual_topology_min_replication_count}}</value>
+        <description>Calculated minimum number of nimbus hosts where the code must be replicated before leader nimbus can mark the topology as active and create assignments. </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.max.replication.wait.time.sec.default</name>
+        <value>60</value>
+        <description>Default maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count. Once this time is elapsed nimbus will go ahead and perform topology activation tasks even if required nimbus.min.replication.count is not achieved</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.max.replication.wait.time.sec</name>
+        <value>{{actual_topology_max_replication_wait_time_sec}}</value>
+        <description>Calculated maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count. Once this time is elapsed nimbus will go ahead and perform topology activation tasks even if required nimbus.min.replication.count is not achieved</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+
+
+    <property>
+        <name>storm.thrift.transport</name>
+        <value>{{storm_thrift_transport}}</value>
+        <description>The transport plug-in that used for Thrift client/server communication.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>_storm.thrift.nonsecure.transport</name>
+        <value>org.apache.storm.security.auth.SimpleTransportPlugin</value>
+        <description>The transport plug-in that used for non-secure mode for for Thrift client/server communication.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>_storm.thrift.secure.transport</name>
+        <value>org.apache.storm.security.auth.kerberos.KerberosSaslTransportPlugin</value>
+        <description>The transport plug-in that used for secure mode for Thrift client/server communication.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.transport</name>
+        <value>org.apache.storm.messaging.netty.Context</value>
+        <description>The transporter for communication among Storm tasks.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.topology.validator</name>
+        <value>org.apache.storm.nimbus.DefaultTopologyValidator</value>
+        <description>A custom class that implements ITopologyValidator that is run whenever a
+            topology is submitted. Can be used to provide business-specific logic for
+            whether topologies are allowed to run or not.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.spout.wait.strategy</name>
+        <value>org.apache.storm.spout.SleepSpoutWaitStrategy</value>
+        <description>A class that implements a strategy for what to do when a spout needs to wait. Waiting is
+            triggered in one of two conditions:
+
+            1. nextTuple emits no tuples
+            2. The spout has hit maxSpoutPending and can't emit any more tuples</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.kryo.factory</name>
+        <value>org.apache.storm.serialization.DefaultKryoFactory</value>
+        <description>Class that specifies how to create a Kryo instance for serialization. Storm will then apply
+            topology.kryo.register and topology.kryo.decorators on top of this. The default implementation
+            implements topology.fall.back.on.java.serialization and turns references off.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.tuple.serializer</name>
+        <value>org.apache.storm.serialization.types.ListDelegateSerializer</value>
+        <description>The serializer class for ListDelegate (tuple payload).
+            The default serializer will be ListDelegateSerializer</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>client.jartransformer.class</name>
+        <description>Storm Topology backward comptability transformer</description>
+        <value>org.apache.storm.hack.StormShadeTransformer</value>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.impersonation.authorizer</name>
+        <description>
+            To ensure only authorized users can perform impersonation you should start nimbus with nimbus.impersonation.authorizer set to org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer.
+            A storm client may submit requests on behalf of another user. For example, if a userX submits an oozie workflow and as part of workflow execution if user oozie wants to submit a topology on behalf of userX it can do so by leveraging the impersonation feature.In order to submit topology as some other user , you can use StormSubmitter.submitTopologyAs API. Alternatively you can use NimbusClient.getConfiguredClientAs to get a nimbus client as some other user and perform any nimbus action(i.e. kill/rebalance/activate/deactivate) using this client.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.impersonation.acl</name>
+        <description>
+            The ImpersonationAuthorizer uses nimbus.impersonation.acl as the acl to authorize users. Following is a sample nimbus config for supporting impersonation:
+            nimbus.impersonation.acl:
+            impersonating_user1:
+            hosts:
+            [comma separated list of hosts from which impersonating_user1 is allowed to impersonate other users]
+            groups:
+            [comma separated list of groups whose users impersonating_user1 is allowed to impersonate]
+            impersonating_user2:
+            hosts:
+            [comma separated list of hosts from which impersonating_user2 is allowed to impersonate other users]
+            groups:
+            [comma separated list of groups whose users impersonating_user2 is allowed to impersonate]
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <!-- Deleted configs. -->
+
+    <property>
+        <name>storm.cluster.metrics.consumer.register</name>
+        <value>[{"class": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter"}]</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.metrics.consumer.register</name>
+        <value>[{"class": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink", "parallelism.hint": 1, "whitelist": ["kafkaOffset\\..+/", "__complete-latency", "__process-latency", "__receive\\.population$", "__sendqueue\\.population$", "__execute-count", "__emit-count", "__ack-count", "__fail-count", "memory/heap\\.usedBytes$", "memory/nonHeap\\.usedBytes$", "GC/.+\\.count$", "GC/.+\\.timeMs$"]}]</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.metrics.aggregate.per.worker</name>
+        <value>true</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.metrics.aggregate.metric.evict.secs</name>
+        <value>5</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.metrics.expand.map.type</name>
+        <value>true</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.metrics.metric.name.separator</name>
+        <value>.</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>java.library.path</name>
+        <value>/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm-client/lib</value>
+        <description>This value is passed to spawned JVMs (e.g., Nimbus, Supervisor, and Workers)
+            for the java.library.path value. java.library.path tells the JVM where
+            to look for native libraries. It is necessary to set this config correctly since
+            Storm uses the ZeroMQ and JZMQ native libs. </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.childopts</name>
+        <value>-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM</value>
+        <description>This parameter is used by the storm-deploy project to configure the jvm options for the nimbus daemon.</description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>worker.childopts</name>
+        <value>-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM</value>
+        <description>The jvm opts provided to workers launched by this supervisor. All \"%ID%\" substrings are replaced with an identifier for this worker.</description>
+        <value-attributes>
+            <type>multiLine</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>supervisor.childopts</name>
+        <value>-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port={{jmxremote_port}} -javaagent:/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM</value>
+        <description>This parameter is used by the storm-deploy project to configure the jvm options for the supervisor daemon.</description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+</configuration>


[04/50] [abbrv] ambari git commit: AMBARI-20758 Aggregate local metrics for minute aggregation time window (dsen)

Posted by ad...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
index c0feed5..e5da9ba 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
@@ -27,6 +27,9 @@ from event_definition import HostMetricCollectEvent, ProcessMetricCollectEvent
 from metric_collector import MetricsCollector
 from emitter import Emitter
 from host_info import HostInfo
+from aggregator import Aggregator
+from aggregator import AggregatorWatchdog
+
 
 logger = logging.getLogger()
 
@@ -50,11 +53,15 @@ class Controller(threading.Thread):
     self.initialize_events_cache()
     self.emitter = Emitter(self.config, self.application_metric_map, stop_handler)
     self._t = None
+    self.aggregator = None
+    self.aggregator_watchdog = None
 
   def run(self):
     logger.info('Running Controller thread: %s' % threading.currentThread().getName())
 
     self.start_emitter()
+    if self.config.is_inmemory_aggregation_enabled():
+      self.start_aggregator_with_watchdog()
 
     # Wake every 5 seconds to push events to the queue
     while True:
@@ -62,6 +69,10 @@ class Controller(threading.Thread):
         logger.warn('Event Queue full!! Suspending further collections.')
       else:
         self.enqueque_events()
+      # restart aggregator if needed
+      if self.config.is_inmemory_aggregation_enabled() and not self.aggregator_watchdog.is_ok():
+        logger.warning("Aggregator is not available. Restarting aggregator.")
+        self.start_aggregator_with_watchdog()
       pass
       # Wait for the service stop event instead of sleeping blindly
       if 0 == self._stop_handler.wait(self.sleep_interval):
@@ -75,6 +86,12 @@ class Controller(threading.Thread):
     # The emitter thread should have stopped by now, just ensure it has shut
     # down properly
     self.emitter.join(5)
+
+    if self.config.is_inmemory_aggregation_enabled():
+      self.aggregator.stop()
+      self.aggregator_watchdog.stop()
+      self.aggregator.join(5)
+      self.aggregator_watchdog.join(5)
     pass
 
   # TODO: Optimize to not use Timer class and use the Queue instead
@@ -115,3 +132,14 @@ class Controller(threading.Thread):
 
   def start_emitter(self):
     self.emitter.start()
+
+  # Start aggregator and watcher threads
+  def start_aggregator_with_watchdog(self):
+    if self.aggregator:
+      self.aggregator.stop()
+    if self.aggregator_watchdog:
+      self.aggregator.stop()
+    self.aggregator = Aggregator(self.config, self._stop_handler)
+    self.aggregator_watchdog = AggregatorWatchdog(self.config, self._stop_handler)
+    self.aggregator.start()
+    self.aggregator_watchdog.start()

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
index e2a7f0d..77b8c23 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
@@ -44,10 +44,16 @@ class Emitter(threading.Thread):
     self._stop_handler = stop_handler
     self.application_metric_map = application_metric_map
     self.collector_port = config.get_server_port()
-    self.all_metrics_collector_hosts = config.get_metrics_collector_hosts()
+    self.all_metrics_collector_hosts = config.get_metrics_collector_hosts_as_list()
     self.is_server_https_enabled = config.is_server_https_enabled()
     self.set_instanceid = config.is_set_instanceid()
     self.instanceid = config.get_instanceid()
+    self.is_inmemory_aggregation_enabled = config.is_inmemory_aggregation_enabled()
+
+    if self.is_inmemory_aggregation_enabled:
+      self.collector_port = config.get_inmemory_aggregation_port()
+      self.all_metrics_collector_hosts = ['localhost']
+      self.is_server_https_enabled = False
 
     if self.is_server_https_enabled:
       self.ca_certs = config.get_ca_certs()

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/stop_handler.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/stop_handler.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/stop_handler.py
index bfb6957..7a9fbec 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/stop_handler.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/stop_handler.py
@@ -117,7 +117,8 @@ class StopHandlerLinux(StopHandler):
 
   def wait(self, timeout=None):
     # Stop process when stop event received
-    if self.stop_event.wait(timeout):
+    self.stop_event.wait(timeout)
+    if self.stop_event.isSet():
       logger.info("Stop event received")
       return 0
     # Timeout

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
index d218015..53d27f8 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
@@ -21,7 +21,7 @@ limitations under the License.
 import logging
 import os
 import sys
-
+import signal
 from ambari_commons.os_utils import remove_file
 
 from core.controller import Controller
@@ -73,6 +73,10 @@ def server_process_main(stop_handler, scmStatus=None):
   if scmStatus is not None:
     scmStatus.reportStarted()
 
+  # For some reason this is needed to catch system signals like SIGTERM
+  # TODO fix if possible
+  signal.pause()
+
   #The controller thread finishes when the stop event is signaled
   controller.join()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
index 211e9cd..76b1c15 100644
--- a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
@@ -72,6 +72,8 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
   private static final String TIMELINE_METRICS_SSL_KEYSTORE_PASSWORD_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + SSL_KEYSTORE_PASSWORD_PROPERTY;
   private static final String TIMELINE_METRICS_KAFKA_INSTANCE_ID_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + INSTANCE_ID_PROPERTY;
   private static final String TIMELINE_METRICS_KAFKA_SET_INSTANCE_ID_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + SET_INSTANCE_ID_PROPERTY;
+  private static final String TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY;
+  private static final String TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY;
   private static final String TIMELINE_DEFAULT_HOST = "localhost";
   private static final String TIMELINE_DEFAULT_PORT = "6188";
   private static final String TIMELINE_DEFAULT_PROTOCOL = "http";
@@ -96,6 +98,8 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
   private String[] includedMetricsPrefixes;
   // Local cache to avoid prefix matching everytime
   private Set<String> excludedMetrics = new HashSet<>();
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
 
   @Override
   protected String getCollectorUri(String host) {
@@ -132,6 +136,17 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
     return hostname;
   }
 
+
+  @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
   public void setMetricsCache(TimelineMetricsCache metricsCache) {
     this.metricsCache = metricsCache;
   }
@@ -169,6 +184,8 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
         instanceId = props.getString(TIMELINE_METRICS_KAFKA_INSTANCE_ID_PROPERTY);
         setInstanceId = props.getBoolean(TIMELINE_METRICS_KAFKA_SET_INSTANCE_ID_PROPERTY);
 
+        hostInMemoryAggregationEnabled = props.getBoolean(TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY, false);
+        hostInMemoryAggregationPort = props.getInt(TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY, 61888);
         setMetricsCache(new TimelineMetricsCache(maxRowCacheSize, metricsSendInterval));
 
         if (metricCollectorProtocol.contains("https")) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
index 08f0598..24b2c8b 100644
--- a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
@@ -55,6 +55,8 @@ public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
   private NimbusClient nimbusClient;
   private String applicationId;
   private int timeoutSeconds;
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
 
   public StormTimelineMetricsReporter() {
 
@@ -96,6 +98,16 @@ public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
+  @Override
   public void prepare(Map conf) {
     LOG.info("Preparing Storm Metrics Reporter");
     try {
@@ -130,6 +142,8 @@ public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
       applicationId = cf.get(APP_ID).toString();
       setInstanceId = Boolean.getBoolean(cf.get(SET_INSTANCE_ID_PROPERTY).toString());
       instanceId = cf.get(INSTANCE_ID_PROPERTY).toString();
+      hostInMemoryAggregationEnabled = Boolean.valueOf(cf.get(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY).toString());
+      hostInMemoryAggregationPort = Integer.valueOf(cf.get(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY).toString());
 
       collectorUri = constructTimelineMetricUri(protocol, findPreferredCollectHost(), port);
       if (protocol.contains("https")) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
index 20f60e1..c9c0538 100644
--- a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
@@ -61,6 +61,8 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
   private String applicationId;
   private boolean setInstanceId;
   private String instanceId;
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
 
   @Override
   protected String getCollectorUri(String host) {
@@ -98,6 +100,16 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
+  @Override
   public void prepare(Map map, Object o, TopologyContext topologyContext, IErrorReporter iErrorReporter) {
     LOG.info("Preparing Storm Metrics Sink");
     try {
@@ -126,6 +138,8 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
 
     instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY);
     setInstanceId = Boolean.valueOf(configuration.getProperty(SET_INSTANCE_ID_PROPERTY, "false"));
+    hostInMemoryAggregationEnabled = Boolean.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY));
+    hostInMemoryAggregationPort = Integer.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY));
     // Initialize the collector write strategy
     super.init();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
index 14f160b..5b75065 100644
--- a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
@@ -50,6 +50,8 @@ public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
   private String instanceId;
   private String applicationId;
   private int timeoutSeconds;
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
 
   public StormTimelineMetricsReporter() {
 
@@ -91,6 +93,16 @@ public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
+  @Override
   public void prepare(Object registrationArgument) {
     LOG.info("Preparing Storm Metrics Reporter");
     try {
@@ -119,6 +131,10 @@ public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
       applicationId = configuration.getProperty(CLUSTER_REPORTER_APP_ID, DEFAULT_CLUSTER_REPORTER_APP_ID);
       setInstanceId = Boolean.valueOf(configuration.getProperty(SET_INSTANCE_ID_PROPERTY));
       instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY);
+
+      hostInMemoryAggregationEnabled = Boolean.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY));
+      hostInMemoryAggregationPort = Integer.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY));
+
       if (protocol.contains("https")) {
         String trustStorePath = configuration.getProperty(SSL_KEYSTORE_PATH_PROPERTY).trim();
         String trustStoreType = configuration.getProperty(SSL_KEYSTORE_TYPE_PROPERTY).trim();

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
index 425201c..320e177 100644
--- a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
@@ -70,6 +70,8 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
   private String applicationId;
   private String instanceId;
   private boolean setInstanceId;
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
 
   @Override
   protected String getCollectorUri(String host) {
@@ -107,6 +109,16 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
+  @Override
   public void prepare(Map map, Object o, TopologyContext topologyContext, IErrorReporter iErrorReporter) {
     LOG.info("Preparing Storm Metrics Sink");
     try {
@@ -137,6 +149,10 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
     port = configuration.getProperty(COLLECTOR_PORT, "6188");
     instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY);
     setInstanceId = Boolean.valueOf(configuration.getProperty(SET_INSTANCE_ID_PROPERTY, "false"));
+
+    hostInMemoryAggregationEnabled = Boolean.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY));
+    hostInMemoryAggregationPort = Integer.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY));
+
     // Initialize the collector write strategy
     super.init();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
index c242a2f..f984253 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
@@ -24,10 +24,13 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.AggregationResult;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricWithAggregatedValues;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import org.apache.hadoop.metrics2.sink.timeline.TopNConfig;
 import org.apache.hadoop.service.AbstractService;
@@ -41,6 +44,7 @@ import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataManager;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.ConditionBuilder;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.TopNCondition;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.function.SeriesAggregateFunction;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.function.TimelineMetricsSeriesAggregateFunction;
@@ -62,6 +66,7 @@ import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_HOST_INMEMORY_AGGREGATION;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.USE_GROUPBY_AGGREGATOR_QUERIES;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.DEFAULT_TOPN_HOSTS_LIMIT;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.AggregationTaskRunner.ACTUAL_AGGREGATOR_NAMES;
@@ -152,10 +157,14 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
       scheduleAggregatorThread(dailyClusterAggregator);
 
       // Start the minute host aggregator
-      TimelineMetricAggregator minuteHostAggregator =
-        TimelineMetricAggregatorFactory.createTimelineMetricAggregatorMinute(
-          hBaseAccessor, metricsConf, haController);
-      scheduleAggregatorThread(minuteHostAggregator);
+      if (Boolean.parseBoolean(metricsConf.get(TIMELINE_METRICS_HOST_INMEMORY_AGGREGATION, "true"))) {
+        LOG.info("timeline.metrics.host.inmemory.aggregation is set to True, disabling host minute aggregation on collector");
+      } else {
+        TimelineMetricAggregator minuteHostAggregator =
+          TimelineMetricAggregatorFactory.createTimelineMetricAggregatorMinute(
+            hBaseAccessor, metricsConf, haController);
+        scheduleAggregatorThread(minuteHostAggregator);
+      }
 
       // Start the hourly host aggregator
       TimelineMetricAggregator hourlyHostAggregator =
@@ -390,6 +399,18 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
   }
 
   @Override
+  public TimelinePutResponse putHostAggregatedMetrics(AggregationResult aggregationResult) throws SQLException, IOException {
+    Map<TimelineMetric, MetricHostAggregate> aggregateMap = new HashMap<>();
+    for (TimelineMetricWithAggregatedValues entry : aggregationResult.getResult()) {
+      aggregateMap.put(entry.getTimelineMetric(), entry.getMetricAggregate());
+    }
+    hBaseAccessor.saveHostAggregateRecords(aggregateMap, PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME);
+
+
+    return new TimelinePutResponse();
+  }
+
+  @Override
   public Map<String, Map<String,Set<String>>> getInstanceHostsMetadata(String instanceId, String appId)
           throws SQLException, IOException {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
index fb369e8..3b2a119 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.util.RetryCounter;
 import org.apache.hadoop.hbase.util.RetryCounterFactory;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.SingleValuedTimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
@@ -40,8 +42,6 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.AggregatorUtils;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricReadHelper;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataKey;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
index 0d5042f..023465b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
@@ -296,6 +296,8 @@ public class TimelineMetricConfiguration {
 
   public static final String AMSHBASE_METRICS_WHITESLIST_FILE = "amshbase_metrics_whitelist";
 
+  public static final String TIMELINE_METRICS_HOST_INMEMORY_AGGREGATION = "timeline.metrics.host.inmemory.aggregation";
+
   private Configuration hbaseConf;
   private Configuration metricsConf;
   private Configuration amsEnvConf;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
index bde09cb..d052d54 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.hadoop.metrics2.sink.timeline.AggregationResult;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
@@ -80,6 +81,7 @@ public interface TimelineMetricStore {
    */
   Map<String, List<TimelineMetricMetadata>> getTimelineMetricMetadata(String query) throws SQLException, IOException;
 
+  TimelinePutResponse putHostAggregatedMetrics(AggregationResult aggregationResult) throws SQLException, IOException;
   /**
    * Returns all hosts that have written metrics with the apps on the host
    * @return { hostname : [ appIds ] }

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorSink.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorSink.java
index 65d54c0..7b03b30 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorSink.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorSink.java
@@ -19,10 +19,10 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline
 
 import java.util.Map;
 
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 
 /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricAggregate.java
deleted file mode 100644
index 825ac25..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricAggregate.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
-
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.codehaus.jackson.annotate.JsonProperty;
-import org.codehaus.jackson.annotate.JsonSubTypes;
-import org.codehaus.jackson.map.ObjectMapper;
-
-import java.io.IOException;
-
-/**
-*
-*/
-@JsonSubTypes({@JsonSubTypes.Type(value = MetricClusterAggregate.class),
-  @JsonSubTypes.Type(value = MetricHostAggregate.class)})
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public class MetricAggregate {
-  private static final ObjectMapper mapper = new ObjectMapper();
-
-  protected Double sum = 0.0;
-  protected Double deviation;
-  protected Double max = Double.MIN_VALUE;
-  protected Double min = Double.MAX_VALUE;
-
-  public MetricAggregate() {
-  }
-
-  MetricAggregate(Double sum, Double deviation, Double max,
-                  Double min) {
-    this.sum = sum;
-    this.deviation = deviation;
-    this.max = max;
-    this.min = min;
-  }
-
-  public void updateSum(Double sum) {
-    this.sum += sum;
-  }
-
-  public void updateMax(Double max) {
-    if (max > this.max) {
-      this.max = max;
-    }
-  }
-
-  public void updateMin(Double min) {
-    if (min < this.min) {
-      this.min = min;
-    }
-  }
-
-  @JsonProperty("sum")
-  public Double getSum() {
-    return sum;
-  }
-
-  @JsonProperty("deviation")
-  public Double getDeviation() {
-    return deviation;
-  }
-
-  @JsonProperty("max")
-  public Double getMax() {
-    return max;
-  }
-
-  @JsonProperty("min")
-  public Double getMin() {
-    return min;
-  }
-
-  public void setSum(Double sum) {
-    this.sum = sum;
-  }
-
-  public void setDeviation(Double deviation) {
-    this.deviation = deviation;
-  }
-
-  public void setMax(Double max) {
-    this.max = max;
-  }
-
-  public void setMin(Double min) {
-    this.min = min;
-  }
-
-  public String toJSON() throws IOException {
-    return mapper.writeValueAsString(this);
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricClusterAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricClusterAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricClusterAggregate.java
deleted file mode 100644
index 9c837b6..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricClusterAggregate.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
-
-
-import org.codehaus.jackson.annotate.JsonCreator;
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
-*
-*/
-public class MetricClusterAggregate extends MetricAggregate {
-  private int numberOfHosts;
-
-  @JsonCreator
-  public MetricClusterAggregate() {
-  }
-
-  public MetricClusterAggregate(Double sum, int numberOfHosts, Double deviation,
-                         Double max, Double min) {
-    super(sum, deviation, max, min);
-    this.numberOfHosts = numberOfHosts;
-  }
-
-  @JsonProperty("numberOfHosts")
-  public int getNumberOfHosts() {
-    return numberOfHosts;
-  }
-
-  public void updateNumberOfHosts(int count) {
-    this.numberOfHosts += count;
-  }
-
-  public void setNumberOfHosts(int numberOfHosts) {
-    this.numberOfHosts = numberOfHosts;
-  }
-
-  /**
-   * Find and update min, max and avg for a minute
-   */
-  public void updateAggregates(MetricClusterAggregate hostAggregate) {
-    updateMax(hostAggregate.getMax());
-    updateMin(hostAggregate.getMin());
-    updateSum(hostAggregate.getSum());
-    updateNumberOfHosts(hostAggregate.getNumberOfHosts());
-  }
-
-  @Override
-  public String toString() {
-    return "MetricAggregate{" +
-      "sum=" + sum +
-      ", numberOfHosts=" + numberOfHosts +
-      ", deviation=" + deviation +
-      ", max=" + max +
-      ", min=" + min +
-      '}';
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricHostAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricHostAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricHostAggregate.java
deleted file mode 100644
index 340ec75..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricHostAggregate.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
-
-
-import org.codehaus.jackson.annotate.JsonCreator;
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
- * Represents a collection of minute based aggregation of values for
- * resolution greater than a minute.
- */
-public class MetricHostAggregate extends MetricAggregate {
-
-  private long numberOfSamples = 0;
-
-  @JsonCreator
-  public MetricHostAggregate() {
-    super(0.0, 0.0, Double.MIN_VALUE, Double.MAX_VALUE);
-  }
-
-  public MetricHostAggregate(Double sum, int numberOfSamples,
-                             Double deviation,
-                             Double max, Double min) {
-    super(sum, deviation, max, min);
-    this.numberOfSamples = numberOfSamples;
-  }
-
-  @JsonProperty("numberOfSamples")
-  public long getNumberOfSamples() {
-    return numberOfSamples == 0 ? 1 : numberOfSamples;
-  }
-
-  public void updateNumberOfSamples(long count) {
-    this.numberOfSamples += count;
-  }
-
-  public void setNumberOfSamples(long numberOfSamples) {
-    this.numberOfSamples = numberOfSamples;
-  }
-
-  public double getAvg() {
-    return sum / numberOfSamples;
-  }
-
-  /**
-   * Find and update min, max and avg for a minute
-   */
-  public void updateAggregates(MetricHostAggregate hostAggregate) {
-    updateMax(hostAggregate.getMax());
-    updateMin(hostAggregate.getMin());
-    updateSum(hostAggregate.getSum());
-    updateNumberOfSamples(hostAggregate.getNumberOfSamples());
-  }
-
-  @Override
-  public String toString() {
-    return "MetricHostAggregate{" +
-      "sum=" + sum +
-      ", numberOfSamples=" + numberOfSamples +
-      ", deviation=" + deviation +
-      ", max=" + max +
-      ", min=" + min +
-      '}';
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java
index 44aca03..9eaf456 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java
@@ -21,6 +21,7 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricsFilter;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataKey;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java
index 0934356..ba16b43 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.AggregationTaskRunner.AGGREGATOR_NAME;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.MetricCollectorHAController;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
index a5a3499..34b1f9b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
@@ -38,6 +38,7 @@ import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.mutable.MutableInt;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.PostProcessingUtil;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricHostAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricHostAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricHostAggregator.java
index 0ea9c08..a17433b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricHostAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricHostAggregator.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.AggregationTaskRunner.AGGREGATOR_NAME;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
index b5f49fb..672f85f 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
 
 
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.SingleValuedTimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
index 9da921a..50cfb08 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
@@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.sink.timeline.AggregationResult;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
 import org.apache.hadoop.metrics2.sink.timeline.PrecisionLimitExceededException;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;
@@ -285,6 +286,36 @@ public class TimelineWebServices {
     }
   }
 
+  /**
+   * Store the given metrics into the timeline store, and return errors that
+   * happened during storing.
+   */
+  @Path("/metrics/aggregated")
+  @POST
+  @Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public TimelinePutResponse postAggregatedMetrics(
+    @Context HttpServletRequest req,
+    @Context HttpServletResponse res,
+    AggregationResult metrics) {
+
+    init(res);
+    if (metrics == null) {
+      return new TimelinePutResponse();
+    }
+
+    try {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Storing aggregated metrics: " +
+                TimelineUtils.dumpTimelineRecordtoJSON(metrics, true));
+      }
+
+      return timelineMetricStore.putHostAggregatedMetrics(metrics);
+    } catch (Exception e) {
+      LOG.error("Error saving metrics.", e);
+      throw new WebApplicationException(e, Response.Status.INTERNAL_SERVER_ERROR);
+    }
+  }
+
   @Path("/containermetrics")
   @POST
   @Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
index 0087fd9..d5baaef 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
@@ -26,12 +26,12 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregator;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregatorFactory;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
index 37ec134..7eeb9c4 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
@@ -17,9 +17,9 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 
 import java.util.Arrays;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessorTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessorTest.java
index a910cc2..d668178 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessorTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessorTest.java
@@ -22,11 +22,11 @@ import com.google.common.collect.Multimap;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java
index 44f48e8..3009163 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics
   .timeline;
 
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.junit.Test;
 
 import static org.assertj.core.api.Assertions.assertThat;
@@ -34,7 +34,7 @@ public class TestMetricHostAggregate {
     assertThat(aggregate.getSum()).isEqualTo(3.0);
     assertThat(aggregate.getMin()).isEqualTo(1.0);
     assertThat(aggregate.getMax()).isEqualTo(2.0);
-    assertThat(aggregate.getAvg()).isEqualTo(3.0 / 2);
+    assertThat(aggregate.calculateAverage()).isEqualTo(3.0 / 2);
   }
 
   @Test
@@ -50,7 +50,7 @@ public class TestMetricHostAggregate {
     assertThat(aggregate.getSum()).isEqualTo(12.0);
     assertThat(aggregate.getMin()).isEqualTo(0.5);
     assertThat(aggregate.getMax()).isEqualTo(7.5);
-    assertThat(aggregate.getAvg()).isEqualTo((3.0 + 8.0 + 1.0) / 5);
+    assertThat(aggregate.calculateAverage()).isEqualTo((3.0 + 8.0 + 1.0) / 5);
   }
 
   static MetricHostAggregate createAggregate (Double sum, Double min,
@@ -63,4 +63,4 @@ public class TestMetricHostAggregate {
     aggregate.setNumberOfSamples(samplesCount);
     return aggregate;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
index f00906e..ac2f9d7 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.hadoop.metrics2.sink.timeline.AggregationResult;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
@@ -92,6 +93,11 @@ public class TestTimelineMetricStore implements TimelineMetricStore {
   }
 
   @Override
+  public TimelinePutResponse putHostAggregatedMetrics(AggregationResult aggregationResult) throws SQLException, IOException {
+    return null;
+  }
+
+  @Override
   public Map<String, Set<String>> getHostAppsMetadata() throws SQLException, IOException {
     return Collections.emptyMap();
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorMemorySink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorMemorySink.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorMemorySink.java
index fa0cfe9..53f6f6c 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorMemorySink.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorMemorySink.java
@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 
 import java.util.Collections;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITClusterAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITClusterAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITClusterAggregator.java
index f083731..07fd85d 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITClusterAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITClusterAggregator.java
@@ -20,13 +20,13 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline
 
 import junit.framework.Assert;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.AbstractMiniHBaseClusterTest;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.MetricTestHelper;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregator;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregatorFactory;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITMetricAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITMetricAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITMetricAggregator.java
index 9873643..75b3f91 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITMetricAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITMetricAggregator.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.AbstractMiniHBaseClusterTest;
@@ -124,14 +125,14 @@ public class ITMetricAggregator extends AbstractMiniHBaseClusterTest {
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(20, currentHostAggregate.getNumberOfSamples());
         assertEquals(15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
         count++;
       } else if ("mem_free".equals(currentMetric.getMetricName())) {
         assertEquals(2.0, currentHostAggregate.getMax());
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(20, currentHostAggregate.getNumberOfSamples());
         assertEquals(15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
         count++;
       } else {
         fail("Unexpected entry");
@@ -198,7 +199,7 @@ public class ITMetricAggregator extends AbstractMiniHBaseClusterTest {
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(12 * 20, currentHostAggregate.getNumberOfSamples());
         assertEquals(12 * 15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
       }
     }
   }
@@ -260,7 +261,7 @@ public class ITMetricAggregator extends AbstractMiniHBaseClusterTest {
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(12 * 20, currentHostAggregate.getNumberOfSamples());
         assertEquals(12 * 15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
       }
     }
   }
@@ -309,14 +310,14 @@ public class ITMetricAggregator extends AbstractMiniHBaseClusterTest {
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(20, currentHostAggregate.getNumberOfSamples());
         assertEquals(15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
         count++;
       } else if ("mem_free".equals(currentMetric.getMetricName())) {
         assertEquals(2.0, currentHostAggregate.getMax());
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(20, currentHostAggregate.getNumberOfSamples());
         assertEquals(15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
         count++;
       } else {
         fail("Unexpected entry");

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
index 78db11d..6541b2c 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
@@ -31,6 +31,7 @@ import java.util.Map;
 import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataKey;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataManager;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/pom.xml b/ambari-metrics/pom.xml
index 2d88912..02f9574 100644
--- a/ambari-metrics/pom.xml
+++ b/ambari-metrics/pom.xml
@@ -33,6 +33,7 @@
     <module>ambari-metrics-host-monitoring</module>
     <module>ambari-metrics-grafana</module>
     <module>ambari-metrics-assembly</module>
+    <module>ambari-metrics-host-aggregator</module>
   </modules>
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java
index 8d1f63f..a0765bf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java
@@ -300,6 +300,16 @@ public class AmbariMetricSinkImpl extends AbstractTimelineMetricsSink implements
     return hostName;
   }
 
+  @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return false;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return 0;
+  }
+
   private List<TimelineMetric> getFilteredMetricList(List<SingleMetric> metrics) {
     final List<TimelineMetric> metricList = new ArrayList<>();
     for (SingleMetric metric : metrics) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
index 150b0a8..5d21514 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
@@ -153,6 +153,8 @@ if has_metric_collector:
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
 
 # if accumulo is selected accumulo_tserver_hosts should not be empty, but still default just in case
 if 'slave_hosts' in config['clusterHostInfo']:

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
index 6873c85..742ea3c 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
@@ -16,6 +16,9 @@
 # Poll collectors every {{metrics_report_interval}} seconds
 *.period={{metrics_collection_period}}
 
+*.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+
 {% if has_metric_collector %}
 
 *.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
index cb66537..4d33661 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
@@ -101,6 +101,14 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>timeline.metrics.host.inmemory.aggregation.jvm.arguments</name>
+    <value>-Xmx256m -Xms128m -XX:PermSize=68m</value>
+    <description>
+      Local aggregator jvm extra arguments separated with spaces
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
     <name>timeline.metrics.skip.network.interfaces.patterns</name>
     <value>None</value>
     <description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
index 8e1671e..1b085f6 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
@@ -787,4 +787,15 @@
     <value>{{cluster_zookeeper_clientPort}}</value>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>timeline.metrics.host.inmemory.aggregation</name>
+    <value>false</value>
+    <description>if set to "true" host metrics will be aggregated in memory on each host</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>timeline.metrics.host.inmemory.aggregation.port</name>
+    <value>61888</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
index 740a91a..9031b46 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
@@ -93,6 +93,9 @@
               <primary>true</primary>
             </log>
           </logs>
+          <configuration-dependencies>
+            <config-type>ams-site</config-type>
+          </configuration-dependencies>
         </component>
 
         <component>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
index a929847..f49d47d 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
@@ -163,6 +163,20 @@ def ams(name=None):
               create_parents = True
     )
 
+    if params.host_in_memory_aggregation and params.log4j_props is not None:
+      File(os.path.join(params.ams_monitor_conf_dir, "log4j.properties"),
+           owner=params.ams_user,
+           content=params.log4j_props
+           )
+
+    XmlConfig("ams-site.xml",
+              conf_dir=params.ams_monitor_conf_dir,
+              configurations=params.config['configurations']['ams-site'],
+              configuration_attributes=params.config['configuration_attributes']['ams-site'],
+              owner=params.ams_user,
+              group=params.user_group
+              )
+
     TemplateConfig(
       os.path.join(params.ams_monitor_conf_dir, "metric_monitor.ini"),
       owner=params.ams_user,
@@ -366,6 +380,22 @@ def ams(name=None, action=None):
               create_parents = True
     )
 
+    if params.host_in_memory_aggregation and params.log4j_props is not None:
+      File(format("{params.ams_monitor_conf_dir}/log4j.properties"),
+           mode=0644,
+           group=params.user_group,
+           owner=params.ams_user,
+           content=InlineTemplate(params.log4j_props)
+           )
+
+    XmlConfig("ams-site.xml",
+              conf_dir=params.ams_monitor_conf_dir,
+              configurations=params.config['configurations']['ams-site'],
+              configuration_attributes=params.config['configuration_attributes']['ams-site'],
+              owner=params.ams_user,
+              group=params.user_group
+              )
+
     Execute(format("{sudo} chown -R {ams_user}:{user_group} {ams_monitor_log_dir}")
             )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index 50dde1c..b8c14f4 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -224,6 +224,11 @@ metrics_collector_heapsize = check_append_heap_property(str(metrics_collector_he
 master_heapsize = check_append_heap_property(str(master_heapsize), "m")
 regionserver_heapsize = check_append_heap_property(str(regionserver_heapsize), "m")
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+host_in_memory_aggregation_jvm_arguments = default("/configurations/ams-env/timeline.metrics.host.inmemory.aggregation.jvm.arguments",
+                                                   "-Xmx256m -Xms128m -XX:PermSize=68m")
+
 regionserver_xmn_max = default('/configurations/ams-hbase-env/hbase_regionserver_xmn_max', None)
 if regionserver_xmn_max:
   regionserver_xmn_max = int(trim_heap_property(str(regionserver_xmn_max), "m"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
index 9729bbe..bb0db4f 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
@@ -58,6 +58,9 @@ rpc.protocol={{metric_collector_protocol}}
 
 *.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
 *.sink.timeline.slave.host.name={{hostname}}
+*.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+
 hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 hbase.sink.timeline.period={{metrics_collection_period}}
 hbase.sink.timeline.sendInterval={{metrics_report_interval}}000

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
index 769ad67..b7dee50 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
@@ -38,3 +38,10 @@ failover_strategy = {{failover_strategy}}
 failover_strategy_blacklisted_interval_seconds = {{failover_strategy_blacklisted_interval_seconds}}
 port = {{metric_collector_port}}
 https_enabled = {{metric_collector_https_enabled}}
+
+[aggregation]
+host_in_memory_aggregation = {{host_in_memory_aggregation}}
+host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+java_home = {{java64_home}}
+jvm_arguments = {{host_in_memory_aggregation_jvm_arguments}}
+ams_monitor_log_dir = {{ams_monitor_log_dir}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
index 86a290f..0e0c9aa 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
@@ -124,6 +124,9 @@ if has_metric_collector:
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
 # Cluster Zookeeper quorum
 zookeeper_quorum = None
 if not len(default("/clusterHostInfo/zookeeper_hosts", [])) == 0:


[27/50] [abbrv] ambari git commit: AMBARI-21068 : Kafka broker goes down after Ambari upgrade from 2.5.0 to 2.5.1 due to missing 'kafka.timeline.metrics.instanceId' property. (Addendum Patch) (avijayan)

Posted by ad...@apache.org.
AMBARI-21068 : Kafka broker goes down after Ambari upgrade from 2.5.0 to 2.5.1 due to missing 'kafka.timeline.metrics.instanceId' property. (Addendum Patch) (avijayan)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d740384e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d740384e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d740384e

Branch: refs/heads/ambari-rest-api-explorer
Commit: d740384e8059d9d7b12d7ae99e7423a92c4df4bf
Parents: c9f705d
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Fri May 19 16:40:30 2017 -0700
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Fri May 19 16:40:30 2017 -0700

----------------------------------------------------------------------
 .../metrics2/sink/timeline/HadoopTimelineMetricsSink.java |  2 +-
 .../sink/timeline/HadoopTimelineMetricsSinkTest.java      |  2 +-
 .../metrics2/sink/kafka/KafkaTimelineMetricsReporter.java |  4 ++--
 .../metrics2/sink/storm/StormTimelineMetricsReporter.java |  6 ++++--
 .../metrics2/sink/storm/StormTimelineMetricsSink.java     |  2 +-
 .../metrics2/sink/storm/StormTimelineMetricsSink.java     |  2 +-
 .../templates/hadoop-metrics2-accumulo.properties.j2      |  2 --
 .../package/templates/hadoop-metrics2-hbase.properties.j2 |  2 --
 .../package/templates/flume-metrics2.properties.j2        |  3 ---
 .../hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2    |  2 --
 .../hadoop-metrics2-hbase.properties-GANGLIA-RS.j2        |  2 --
 .../configuration/hadoop-metrics2.properties.xml          |  2 --
 .../templates/hadoop-metrics2-hivemetastore.properties.j2 |  2 --
 .../templates/hadoop-metrics2-hiveserver2.properties.j2   |  2 --
 .../package/templates/hadoop-metrics2-llapdaemon.j2       |  2 --
 .../templates/hadoop-metrics2-llaptaskscheduler.j2        |  2 --
 .../templates/hadoop-metrics2-hivemetastore.properties.j2 |  2 --
 .../templates/hadoop-metrics2-hiveserver2.properties.j2   |  2 --
 .../package/templates/hadoop-metrics2-llapdaemon.j2       |  2 --
 .../templates/hadoop-metrics2-llaptaskscheduler.j2        |  2 --
 .../KAFKA/0.10.0.3.0/configuration/kafka-broker.xml       | 10 ----------
 .../KAFKA/0.8.1/configuration/kafka-broker.xml            | 10 ----------
 .../STORM/0.9.1/package/templates/config.yaml.j2          |  3 ---
 .../0.9.1/package/templates/storm-metrics2.properties.j2  |  2 --
 .../STORM/1.0.1.3.0/package/templates/config.yaml.j2      |  3 ---
 .../package/templates/storm-metrics2.properties.j2        |  2 --
 .../before-START/templates/hadoop-metrics2.properties.j2  |  2 --
 .../HDFS/configuration/hadoop-metrics2.properties.xml     |  2 --
 .../before-START/templates/hadoop-metrics2.properties.j2  |  2 --
 29 files changed, 10 insertions(+), 73 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
index c235c7c..a290ced 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
@@ -98,7 +98,7 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
     }
 
     serviceName = getServiceName(conf);
-    instanceId = conf.getString(INSTANCE_ID_PROPERTY);
+    instanceId = conf.getString(INSTANCE_ID_PROPERTY, null);
     setInstanceId = conf.getBoolean(SET_INSTANCE_ID_PROPERTY, false);
 
     LOG.info("Identified hostname = " + hostName + ", serviceName = " + serviceName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
index 4a009dc..30c5c23 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
@@ -114,7 +114,7 @@ public class HadoopTimelineMetricsSinkTest {
     expect(conf.getInt(eq(MAX_METRIC_ROW_CACHE_SIZE), anyInt())).andReturn(10).anyTimes();
     expect(conf.getInt(eq(METRICS_SEND_INTERVAL), anyInt())).andReturn(1000).anyTimes();
     expect(conf.getBoolean(eq(SET_INSTANCE_ID_PROPERTY), eq(false))).andReturn(true).anyTimes();
-    expect(conf.getString(eq(INSTANCE_ID_PROPERTY))).andReturn("instanceId").anyTimes();
+    expect(conf.getString(eq(INSTANCE_ID_PROPERTY), anyString())).andReturn("instanceId").anyTimes();
 
     conf.setListDelimiter(eq(','));
     expectLastCall().anyTimes();

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
index 76b1c15..6f5e9e0 100644
--- a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
@@ -181,8 +181,8 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
         collectorHosts = parseHostsStringIntoCollection(props.getString(TIMELINE_HOSTS_PROPERTY, TIMELINE_DEFAULT_HOST));
         metricCollectorProtocol = props.getString(TIMELINE_PROTOCOL_PROPERTY, TIMELINE_DEFAULT_PROTOCOL);
 
-        instanceId = props.getString(TIMELINE_METRICS_KAFKA_INSTANCE_ID_PROPERTY);
-        setInstanceId = props.getBoolean(TIMELINE_METRICS_KAFKA_SET_INSTANCE_ID_PROPERTY);
+        instanceId = props.getString(TIMELINE_METRICS_KAFKA_INSTANCE_ID_PROPERTY, null);
+        setInstanceId = props.getBoolean(TIMELINE_METRICS_KAFKA_SET_INSTANCE_ID_PROPERTY, false);
 
         hostInMemoryAggregationEnabled = props.getBoolean(TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY, false);
         hostInMemoryAggregationPort = props.getInt(TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY, 61888);

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
index 24b2c8b..d408e1a 100644
--- a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
@@ -140,8 +140,10 @@ public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
           Integer.parseInt(cf.get(METRICS_POST_TIMEOUT_SECONDS).toString()) :
           DEFAULT_POST_TIMEOUT_SECONDS;
       applicationId = cf.get(APP_ID).toString();
-      setInstanceId = Boolean.getBoolean(cf.get(SET_INSTANCE_ID_PROPERTY).toString());
-      instanceId = cf.get(INSTANCE_ID_PROPERTY).toString();
+      if (cf.containsKey(SET_INSTANCE_ID_PROPERTY)) {
+        setInstanceId = Boolean.getBoolean(cf.get(SET_INSTANCE_ID_PROPERTY).toString());
+        instanceId = cf.get(INSTANCE_ID_PROPERTY).toString();
+      }
       hostInMemoryAggregationEnabled = Boolean.valueOf(cf.get(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY).toString());
       hostInMemoryAggregationPort = Integer.valueOf(cf.get(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY).toString());
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
index c9c0538..ff72f24 100644
--- a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
@@ -136,7 +136,7 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
     protocol = configuration.getProperty(COLLECTOR_PROTOCOL, "http");
     port = configuration.getProperty(COLLECTOR_PORT, "6188");
 
-    instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY);
+    instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY, null);
     setInstanceId = Boolean.valueOf(configuration.getProperty(SET_INSTANCE_ID_PROPERTY, "false"));
     hostInMemoryAggregationEnabled = Boolean.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY));
     hostInMemoryAggregationPort = Integer.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY));

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
index 320e177..4d5a229 100644
--- a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
@@ -147,7 +147,7 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
 
     protocol = configuration.getProperty(COLLECTOR_PROTOCOL, "http");
     port = configuration.getProperty(COLLECTOR_PORT, "6188");
-    instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY);
+    instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY, null);
     setInstanceId = Boolean.valueOf(configuration.getProperty(SET_INSTANCE_ID_PROPERTY, "false"));
 
     hostInMemoryAggregationEnabled = Boolean.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY));

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
index 742ea3c..e59ba11 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
@@ -43,8 +43,6 @@ accumulo.sink.timeline.period={{metrics_collection_period}}
 accumulo.sink.timeline.sendInterval={{metrics_report_interval}}000
 accumulo.sink.timeline.collector.hosts={{ams_collector_hosts}}
 accumulo.sink.timeline.port={{metric_collector_port}}
-accumulo.sink.timeline.instanceId={{cluster_name}}
-accumulo.sink.timeline.set.instanceId={{set_instanceId}}
 
 # HTTPS properties
 accumulo.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
index bb0db4f..978b795 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
@@ -68,8 +68,6 @@ hbase.sink.timeline.collector.hosts={{ams_collector_hosts}}
 hbase.sink.timeline.port={{metric_collector_port}}
 hbase.sink.timeline.protocol={{metric_collector_protocol}}
 hbase.sink.timeline.serviceName-prefix=ams
-hbase.sink.timeline.instanceId={{cluster_name}}
-hbase.sink.timeline.set.instanceId={{set_instanceId}}
 
 # HTTPS properties
 hbase.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
index 28944ca..c476019 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
@@ -26,9 +26,6 @@ sendInterval={{metrics_report_interval}}000
 host_in_memory_aggregation = {{host_in_memory_aggregation}}
 host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
-instanceId={{cluster_name}}
-set.instanceId={{set_instanceId}}
-
 # HTTPS properties
 truststore.path = {{metric_truststore_path}}
 truststore.type = {{metric_truststore_type}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
index c8f2f13..7368ffe 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
@@ -76,8 +76,6 @@ hbase.sink.timeline.sendInterval={{metrics_report_interval}}000
 hbase.sink.timeline.collector.hosts={{ams_collector_hosts}}
 hbase.sink.timeline.protocol={{metric_collector_protocol}}
 hbase.sink.timeline.port={{metric_collector_port}}
-hbase.sink.timeline.instanceId={{cluster_name}}
-hbase.sink.timeline.set.instanceId={{set_instanceId}}
 hbase.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
 hbase.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
index f4e25e1..f245365 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
@@ -74,8 +74,6 @@ hbase.sink.timeline.sendInterval={{metrics_report_interval}}000
 hbase.sink.timeline.collector.hosts={{ams_collector_hosts}}
 hbase.sink.timeline.protocol={{metric_collector_protocol}}
 hbase.sink.timeline.port={{metric_collector_port}}
-hbase.sink.timeline.instanceId={{cluster_name}}
-hbase.sink.timeline.set.instanceId={{set_instanceId}}
 hbase.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
 hbase.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
index 4b03880..84ea231 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
@@ -86,8 +86,6 @@ resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 *.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
 *.sink.timeline.protocol={{metric_collector_protocol}}
 *.sink.timeline.port={{metric_collector_port}}
-*.sink.timeline.instanceId={{cluster_name}}
-*.sink.timeline.set.instanceId={{set_instanceId}}
 *.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
 *.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
index d78a342..3093e56 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
@@ -42,8 +42,6 @@
   *.sink.timeline.period={{metrics_collection_period}}
   *.sink.timeline.sendInterval={{metrics_report_interval}}000
   *.sink.timeline.slave.host.name = {{hostname}}
-  *.sink.timeline.instanceId={{cluster_name}}
-  *.sink.timeline.set.instanceId={{set_instanceId}}
 
   # HTTPS properties
   *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
index 1f496ef..59a7c1b 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
@@ -42,8 +42,6 @@
   *.sink.timeline.period={{metrics_collection_period}}
   *.sink.timeline.sendInterval={{metrics_report_interval}}000
   *.sink.timeline.slave.host.name = {{hostname}}
-  *.sink.timeline.instanceId={{cluster_name}}
-  *.sink.timeline.set.instanceId={{set_instanceId}}
 
   # HTTPS properties
   *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2
index 01869c0..69f6071 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2
@@ -41,8 +41,6 @@
   *.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
   *.sink.timeline.period={{metrics_collection_period}}
   *.sink.timeline.sendInterval={{metrics_report_interval}}000
-  *.sink.timeline.instanceId={{cluster_name}}
-  *.sink.timeline.set.instanceId={{set_instanceId}}
 
   # HTTPS properties
   *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
index 2e25c4a..c08a498 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
@@ -41,8 +41,6 @@
   *.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
   *.sink.timeline.period={{metrics_collection_period}}
   *.sink.timeline.sendInterval={{metrics_report_interval}}000
-  *.sink.timeline.instanceId={{cluster_name}}
-  *.sink.timeline.set.instanceId={{set_instanceId}}
 
   # HTTPS properties
   *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
index d78a342..3093e56 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
@@ -42,8 +42,6 @@
   *.sink.timeline.period={{metrics_collection_period}}
   *.sink.timeline.sendInterval={{metrics_report_interval}}000
   *.sink.timeline.slave.host.name = {{hostname}}
-  *.sink.timeline.instanceId={{cluster_name}}
-  *.sink.timeline.set.instanceId={{set_instanceId}}
 
   # HTTPS properties
   *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
index 1f496ef..59a7c1b 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
@@ -42,8 +42,6 @@
   *.sink.timeline.period={{metrics_collection_period}}
   *.sink.timeline.sendInterval={{metrics_report_interval}}000
   *.sink.timeline.slave.host.name = {{hostname}}
-  *.sink.timeline.instanceId={{cluster_name}}
-  *.sink.timeline.set.instanceId={{set_instanceId}}
 
   # HTTPS properties
   *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2
index 01869c0..69f6071 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2
@@ -41,8 +41,6 @@
   *.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
   *.sink.timeline.period={{metrics_collection_period}}
   *.sink.timeline.sendInterval={{metrics_report_interval}}000
-  *.sink.timeline.instanceId={{cluster_name}}
-  *.sink.timeline.set.instanceId={{set_instanceId}}
 
   # HTTPS properties
   *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
index 2e25c4a..c08a498 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
@@ -41,8 +41,6 @@
   *.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
   *.sink.timeline.period={{metrics_collection_period}}
   *.sink.timeline.sendInterval={{metrics_report_interval}}000
-  *.sink.timeline.instanceId={{cluster_name}}
-  *.sink.timeline.set.instanceId={{set_instanceId}}
 
   # HTTPS properties
   *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka-broker.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka-broker.xml
index 46c14c0..1cddb89 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka-broker.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka-broker.xml
@@ -401,16 +401,6 @@
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
-    <name>kafka.timeline.metrics.instanceId</name>
-    <value>{{cluster_name}}</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>kafka.timeline.metrics.set.instanceId</name>
-    <value>{{set_instanceId}}</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
     <name>kafka.timeline.metrics.maxRowCacheSize</name>
     <value>10000</value>
     <description>Timeline metrics reporter send interval</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
index 26e7a77..39dfeba 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
@@ -407,16 +407,6 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>kafka.timeline.metrics.instanceId</name>
-    <value>{{cluster_name}}</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>kafka.timeline.metrics.set.instanceId</name>
-    <value>{{set_instanceId}}</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
     <name>kafka.timeline.metrics.maxRowCacheSize</name>
     <value>10000</value>
     <description>Timeline metrics reporter send interval</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2 b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2
index 67b89c4..b2dd3c8 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2
@@ -69,7 +69,4 @@ metrics_collector:
   truststore.type : "{{metric_truststore_type}}"
   truststore.password : "{{metric_truststore_password}}"
 
-  instanceId={{cluster_name}}
-  set.instanceId={{set_instanceId}}
-
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2
index 1dedffc..e7db91e 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2
@@ -23,8 +23,6 @@ zookeeper.quorum={{zookeeper_quorum}}
 maxRowCacheSize=10000
 sendInterval={{metrics_report_interval}}000
 clusterReporterAppId=nimbus
-instanceId={{cluster_name}}
-set.instanceId={{set_instanceId}}
 host_in_memory_aggregation = {{host_in_memory_aggregation}}
 host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/config.yaml.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/config.yaml.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/config.yaml.j2
index 67b89c4..b2dd3c8 100644
--- a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/config.yaml.j2
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/config.yaml.j2
@@ -69,7 +69,4 @@ metrics_collector:
   truststore.type : "{{metric_truststore_type}}"
   truststore.password : "{{metric_truststore_password}}"
 
-  instanceId={{cluster_name}}
-  set.instanceId={{set_instanceId}}
-
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm-metrics2.properties.j2
index 1dedffc..e7db91e 100644
--- a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm-metrics2.properties.j2
@@ -23,8 +23,6 @@ zookeeper.quorum={{zookeeper_quorum}}
 maxRowCacheSize=10000
 sendInterval={{metrics_report_interval}}000
 clusterReporterAppId=nimbus
-instanceId={{cluster_name}}
-set.instanceId={{set_instanceId}}
 host_in_memory_aggregation = {{host_in_memory_aggregation}}
 host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
index 1f8499f..2cd9aa8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -75,8 +75,6 @@ resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 *.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
 *.sink.timeline.protocol={{metric_collector_protocol}}
 *.sink.timeline.port={{metric_collector_port}}
-*.sink.timeline.instanceId={{cluster_name}}
-*.sink.timeline.set.instanceId={{set_instanceId}}
 *.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
 *.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml
index 4b03880..84ea231 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml
@@ -86,8 +86,6 @@ resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 *.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
 *.sink.timeline.protocol={{metric_collector_protocol}}
 *.sink.timeline.port={{metric_collector_port}}
-*.sink.timeline.instanceId={{cluster_name}}
-*.sink.timeline.set.instanceId={{set_instanceId}}
 *.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
 *.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d740384e/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
index 1f8499f..2cd9aa8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -75,8 +75,6 @@ resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 *.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
 *.sink.timeline.protocol={{metric_collector_protocol}}
 *.sink.timeline.port={{metric_collector_port}}
-*.sink.timeline.instanceId={{cluster_name}}
-*.sink.timeline.set.instanceId={{set_instanceId}}
 *.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
 *.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 


[06/50] [abbrv] ambari git commit: AMBARI-21046. UI: Upgrades should be started using repo_version_ids instead of version strings (alexantonenko)

Posted by ad...@apache.org.
AMBARI-21046. UI: Upgrades should be started using repo_version_ids instead of version strings (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1568f800
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1568f800
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1568f800

Branch: refs/heads/ambari-rest-api-explorer
Commit: 1568f800764a6b20f2f09330f112070ebc0f7f86
Parents: 041d353
Author: Alex Antonenko <hi...@gmail.com>
Authored: Wed May 17 19:24:44 2017 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Wed May 17 19:56:45 2017 +0300

----------------------------------------------------------------------
 .../controllers/main/admin/stack_and_upgrade_controller.js    | 7 +++++--
 ambari-web/app/utils/ajax/ajax.js                             | 4 ++--
 .../main/admin/stack_and_upgrade_controller_test.js           | 6 ++++++
 3 files changed, 13 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1568f800/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
index 0f2efb0..d444b2d 100644
--- a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
@@ -414,7 +414,8 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
     if (currentVersion) {
       this.set('currentVersion', {
         repository_version: currentVersion.get('repositoryVersion.repositoryVersion'),
-        repository_name: currentVersion.get('repositoryVersion.displayName')
+        repository_name: currentVersion.get('repositoryVersion.displayName'),
+        id: currentVersion.get('repositoryVersion.id')
       });
     }
   },
@@ -736,6 +737,7 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
         from: App.RepositoryVersion.find().findProperty('displayName', this.get('upgradeVersion')).get('repositoryVersion'),
         value: currentVersion.repository_version,
         label: currentVersion.repository_name,
+        id: currentVersion.id,
         isDowngrade: true,
         upgradeType: this.get('upgradeType')
       },
@@ -1379,7 +1381,8 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
       label: version.get('displayName'),
       type: version.get('upgradeType'),
       skipComponentFailures: version.get('skipComponentFailures') ? 'true' : 'false',
-      skipSCFailures: version.get('skipSCFailures') ? 'true' : 'false'
+      skipSCFailures: version.get('skipSCFailures') ? 'true' : 'false',
+      id: version.get('id')
     };
     if (App.get('supports.preUpgradeCheck')) {
       this.set('requestInProgress', true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/1568f800/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index d9eeaa6..0b584d8 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -1712,7 +1712,7 @@ var urls = {
         timeout : 600000,
         data: JSON.stringify({
           "Upgrade": {
-            "repository_version": data.value,
+            "repository_version_id": data.id,
             "upgrade_type": data.type,
             "skip_failures": data.skipComponentFailures,
             "skip_service_check_failures": data.skipSCFailures,
@@ -1731,7 +1731,7 @@ var urls = {
         data: JSON.stringify({
           "Upgrade": {
             "from_version": data.from,
-            "repository_version": data.value,
+            "repository_version_id": data.id,
             "upgrade_type": data.upgradeType,
             "direction": "DOWNGRADE"
           }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1568f800/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js b/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
index e696bb1..fa0a0b9 100644
--- a/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
+++ b/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
@@ -128,6 +128,7 @@ describe('App.MainAdminStackAndUpgradeController', function() {
       sinon.stub(App.StackVersion, 'find').returns([Em.Object.create({
         state: 'CURRENT',
         repositoryVersion: {
+          id: '1',
           repositoryVersion: '2.2',
           displayName: 'HDP-2.2'
         }
@@ -155,6 +156,7 @@ describe('App.MainAdminStackAndUpgradeController', function() {
     });
     it('currentVersion is corrent', function () {
       expect(controller.get('currentVersion')).to.eql({
+        "id": "1",
         "repository_version": "2.2",
         "repository_name": "HDP-2.2"
       });
@@ -389,6 +391,7 @@ describe('App.MainAdminStackAndUpgradeController', function() {
   describe("#runPreUpgradeCheck()", function() {
     it("make ajax call", function() {
       controller.runPreUpgradeCheck(Em.Object.create({
+        id: '1',
         repositoryVersion: '2.2',
         displayName: 'HDP-2.2',
         upgradeType: 'ROLLING',
@@ -399,6 +402,7 @@ describe('App.MainAdminStackAndUpgradeController', function() {
       expect(args[0]).to.exists;
       expect(args[0].sender).to.be.eql(controller);
       expect(args[0].data).to.be.eql({
+        id: '1',
         value: '2.2',
         label: 'HDP-2.2',
         type: 'ROLLING',
@@ -1126,6 +1130,7 @@ describe('App.MainAdminStackAndUpgradeController', function() {
       controller.set('upgradeVersion', 'HDP-2.3');
       controller.set('upgradeType', 'NON_ROLLING');
       controller.startDowngrade(Em.Object.create({
+        id: '1',
         repository_version: '2.2',
         repository_name: 'HDP-2.2'
       }));
@@ -1139,6 +1144,7 @@ describe('App.MainAdminStackAndUpgradeController', function() {
     it('request-data is valid', function () {
       expect(this.callArgs.data).to.eql({
         from: '2.3',
+        id: '1',
         value: '2.2',
         label: 'HDP-2.2',
         isDowngrade: true,


[24/50] [abbrv] ambari git commit: AMBARI-21074 - Storm XML File has Invalid Characters Causing Exceptions on Server Startup (jonathanhurley)

Posted by ad...@apache.org.
AMBARI-21074 - Storm XML File has Invalid Characters Causing Exceptions on Server Startup (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f952c9ee
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f952c9ee
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f952c9ee

Branch: refs/heads/ambari-rest-api-explorer
Commit: f952c9eecdc8cba07117ecb39265f05e6ef47341
Parents: f2bbe47
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri May 19 10:19:49 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri May 19 10:19:49 2017 -0400

----------------------------------------------------------------------
 .../common-services/STORM/0.9.1/configuration/storm-env.xml      | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f952c9ee/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml b/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
index cfa33e2..3d4edad 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
@@ -128,11 +128,11 @@ export STORM_HOME={{storm_component_home_dir}}
 
 #set storm-auto creds
 # check if storm_jaas.conf in config , only enable storm_auto_creds in secure mode.
-STORM_HOME="$(dirname $(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ))"
+STORM_HOME="$(dirname $(cd "$( dirname "${BASH_SOURCE[0]}" )" &amp;&amp; pwd ))"
 STORM_JAAS_CONF=$STORM_HOME/config/storm_jaas.conf
 STORM_AUTOCREDS_LIB_DIR=/usr/hdp/current/storm-client/external/storm-autocreds
 
-if [ -f $STORM_JAAS_CONF ] && [ -d $STORM_AUTOCREDS_LIB_DIR ]; then
+if [ -f $STORM_JAAS_CONF ] &amp;&amp; [ -d $STORM_AUTOCREDS_LIB_DIR ]; then
     export STORM_EXT_CLASSPATH=$STORM_AUTOCREDS_LIB_DIR
 fi
 


[25/50] [abbrv] ambari git commit: AMBARI-20973. Review the use/need of host clean up script. (Ishan Bhatt via Jaimin)

Posted by ad...@apache.org.
AMBARI-20973. Review the use/need of host clean up script. (Ishan Bhatt via Jaimin)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ae40bed9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ae40bed9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ae40bed9

Branch: refs/heads/ambari-rest-api-explorer
Commit: ae40bed9fc99965babaf3457bbd04758bba99023
Parents: f952c9e
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Fri May 19 10:20:48 2017 -0700
Committer: Jaimin Jetly <ja...@hortonworks.com>
Committed: Fri May 19 10:20:48 2017 -0700

----------------------------------------------------------------------
 ambari-web/app/messages.js                                     | 6 +-----
 .../app/templates/wizard/step3/step3_host_warnings_popup.hbs   | 4 +---
 2 files changed, 2 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ae40bed9/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index f34cbdc..0c15a19 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -755,11 +755,7 @@ Em.I18n.translations = {
   'installer.step3.hostWarningsPopup.report.user': '<br><br>######################################<br># Users<br>#<br># A space delimited list of users who should not exist.<br># Provided so that administrators can easily copy paths into scripts, email etc.<br># Example: userdel hdfs<br>######################################<br>USERS<br>',
   'installer.step3.hostWarningsPopup.report.folder': '\\ /folder',
   'installer.step3.hostWarningsPopup.checks': 'Host Checks found',
-  'installer.step3.hostWarningsPopup.notice.beginning': 'After manually resolving the issues, click <b>Rerun Checks</b>.' +
-    '<br>To manually resolve issues on <b>each host</b> run the HostCleanup script (Python 2.6 or greater is required):<br>',
-  'installer.step3.hostWarningsPopup.notice.command': 'python /usr/lib/python2.6/site-packages/ambari_agent/HostCleanup.py --silent --skip=users',
-  'installer.step3.hostWarningsPopup.notice.end': '<div class="alert alert-warning"><b>Note</b>: Clean up of Firewall and Transparent Huge Page issues are not supported by the HostCleanup script.</div>' +
-    '<div class="alert alert-warning"><b>Note</b>: To clean up in interactive mode, remove <b>--silent</b> option. To clean up all resources, including <i>users</i>, remove <b>--skip=users</b> option. Use <b>--help</b> for a list of available options.</div>',
+  'installer.step3.hostWarningsPopup.notice.beginning': 'After manually resolving the issues, click <b>Rerun Checks</b>.',
   'installer.step3.hostWarningsPopup.summary':'{0} on {1}',
   'installer.step3.hostWarningsPopup.jdk':'JDK Issues',
   'installer.step3.hostWarningsPopup.jdk.name':'JDK not found at <i>{0}</i>',

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae40bed9/ambari-web/app/templates/wizard/step3/step3_host_warnings_popup.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/step3/step3_host_warnings_popup.hbs b/ambari-web/app/templates/wizard/step3/step3_host_warnings_popup.hbs
index 07ff31b..e234254 100644
--- a/ambari-web/app/templates/wizard/step3/step3_host_warnings_popup.hbs
+++ b/ambari-web/app/templates/wizard/step3/step3_host_warnings_popup.hbs
@@ -20,9 +20,7 @@
     <div id="host-warnings">
       <div class="notice">
         <span>{{t installer.step3.hostWarningsPopup.checks}} <b>{{view.warningsNotice}}</b>.<br>
-          {{t installer.step3.hostWarningsPopup.notice.beginning}}
-          <div class="code-snippet" {{QAAttr "host-cleanup-script"}}>{{t installer.step3.hostWarningsPopup.notice.command}}</div>
-          {{t installer.step3.hostWarningsPopup.notice.end}}</span>
+          {{t installer.step3.hostWarningsPopup.notice.beginning}}</span>
       </div>
       <div class="row">
         <form class="form-horizontal">


[30/50] [abbrv] ambari git commit: AMBARI-21080. Update shuffle timeout settings for hdp stack. (sseth via Swapan Shridhar).

Posted by ad...@apache.org.
AMBARI-21080. Update shuffle timeout settings for hdp stack. (sseth via Swapan Shridhar).


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c23602c9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c23602c9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c23602c9

Branch: refs/heads/ambari-rest-api-explorer
Commit: c23602c9f0412bba8375e7f8f28f05c970e04159
Parents: 55af336
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Sat May 20 01:01:07 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Sat May 20 01:01:07 2017 -0700

----------------------------------------------------------------------
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml          |  2 ++
 .../HIVE/configuration/tez-interactive-site.xml         | 12 ++++++++++++
 2 files changed, 14 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c23602c9/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index 88b8a35..a29f74b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -495,6 +495,8 @@
             <set key="tez.session.am.dag.submit.timeout.secs" value="1209600"/>
             <set key="tez.runtime.enable.final-merge.in.output" value="false"/>
             <set key="tez.am.task.reschedule.higher.priority" value="false"/>
+            <set key="tez.runtime.shuffle.connect.timeout" value ="30000"/>
+            <set key="tez.runtime.shuffle.read.timeout" value="30000"/>
           </definition>
 
           <definition xsi:type="configure" id="hdp_2_6_0_0_copy_hive_tez_container_size_to_hiveInteractive">

http://git-wip-us.apache.org/repos/asf/ambari/blob/c23602c9/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/tez-interactive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/tez-interactive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/tez-interactive-site.xml
index 6752d65..2c9b272 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/tez-interactive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/tez-interactive-site.xml
@@ -126,5 +126,17 @@
     <description>Whether rescheduled tasks should be treated at higher priority</description>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>tez.runtime.shuffle.connect.timeout</name>
+    <value>30000</value>
+    <description>Shuffle connect timeouts (ms)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>tez.runtime.shuffle.read.timeout</name>
+    <value>30000</value>
+    <description>Shuffle read timeout (ms)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
 
 </configuration>


[33/50] [abbrv] ambari git commit: Revert "ADDENDUM. AMBARI-21011. Upgrade Code. Append PATH to YARN config 'yarn.nodemanager.admin-env' for HDP 2.6."

Posted by ad...@apache.org.
Revert "ADDENDUM. AMBARI-21011. Upgrade Code. Append PATH to YARN config 'yarn.nodemanager.admin-env' for HDP 2.6."

This reverts commit d0a5cd4a6b22f0c8e02bb7ceb2d5de11314f542a.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1603cd68
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1603cd68
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1603cd68

Branch: refs/heads/ambari-rest-api-explorer
Commit: 1603cd6840852b69567523f74a4b708aa4bd6d73
Parents: ccd6b25
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Mon May 22 01:26:16 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Mon May 22 01:26:22 2017 -0700

----------------------------------------------------------------------
 .../resources/stacks/HDP/2.3/upgrades/config-upgrade.xml     | 8 --------
 .../stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml       | 6 ------
 .../main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml   | 1 -
 .../resources/stacks/HDP/2.4/upgrades/config-upgrade.xml     | 6 +-----
 .../stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml       | 6 ------
 .../main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml   | 1 -
 .../resources/stacks/HDP/2.5/upgrades/config-upgrade.xml     | 8 --------
 .../stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml       | 6 ------
 .../main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml   | 4 ----
 .../resources/stacks/HDP/2.6/upgrades/config-upgrade.xml     | 4 ----
 .../stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml       | 7 -------
 .../main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml   | 1 -
 12 files changed, 1 insertion(+), 57 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1603cd68/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
index 98bb056..8b5c07d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
@@ -546,14 +546,6 @@
           </definition>
         </changes>
       </component>
-      <component name="NODEMANAGER">
-        <changes>
-          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-            <type>yarn-site</type>
-            <insert key="yarn.nodemanager.admin-env" value=",PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:$PATH" insert-type="append" newline-before="false" newline-after="false" />
-          </definition>
-        </changes>
-      </component>
     </service>
 
     <service name="MAPREDUCE2">

http://git-wip-us.apache.org/repos/asf/ambari/blob/1603cd68/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
index 4d2b3ec..5aa08c5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
@@ -353,12 +353,6 @@
         </task>
       </execute-stage>
 
-      <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM admin env">
-        <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-          <summary>Updating YARN NodeManager admin env config</summary>
-        </task>
-      </execute-stage>
-
       <!--Yarn Apptimeline server-->
       <execute-stage service="YARN" component="APP_TIMELINE_SERVER" title="Apply config changes for App timeline server">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixYarnWebServiceUrl">

http://git-wip-us.apache.org/repos/asf/ambari/blob/1603cd68/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
index f1dd943..d98bb53 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
@@ -789,7 +789,6 @@
       <component name="NODEMANAGER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_add_spark2_yarn_shuffle"/>
-          <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env"/>
         </pre-upgrade>
 
         <pre-downgrade/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1603cd68/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
index b448a2d..b3d19d4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
@@ -332,11 +332,7 @@
             <set key="yarn.nodemanager.aux-services" value="mapreduce_shuffle,spark_shuffle,spark2_shuffle"/>
             <!-- Ideally we need to append spark2_shuffle to the existing value -->
           </definition>
-          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-            <type>yarn-site</type>
-            <insert key="yarn.nodemanager.admin-env" value=",PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:$PATH" insert-type="append" newline-before="false" newline-after="false" />
-          </definition>
-      </changes>
+        </changes>
       </component>
     </service>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1603cd68/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
index 4920f12..4a2a502 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
@@ -331,12 +331,6 @@
         </task>
       </execute-stage>
 
-      <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM admin env">
-        <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-          <summary>Updating YARN NodeManager admin env config</summary>
-        </task>
-      </execute-stage>
-
       <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
         <task xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename">
           <summary>Adding queue customization property</summary>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1603cd68/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
index 6acedc9..1eb9836 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
@@ -794,7 +794,6 @@
       <component name="NODEMANAGER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_add_spark2_yarn_shuffle"/>
-          <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env"/>
         </pre-upgrade>
 
         <pre-downgrade/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1603cd68/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index 4c6cb21..a29f74b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -206,14 +206,6 @@
           </definition>
         </changes>
       </component>
-      <component name="NODEMANAGER">
-        <changes>
-          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-            <type>yarn-site</type>
-            <insert key="yarn.nodemanager.admin-env" value=",PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:$PATH" insert-type="append" newline-before="false" newline-after="false" />
-          </definition>
-        </changes>
-      </component>
     </service>
 
     <service name="MAPREDUCE2">

http://git-wip-us.apache.org/repos/asf/ambari/blob/1603cd68/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index d617a31..8c659ee 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -345,12 +345,6 @@
         </task>
       </execute-stage>
 
-      <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM admin env">
-        <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-          <summary>Updating YARN NM admin env config</summary>
-        </task>
-      </execute-stage>
-
       <execute-stage>
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixCapacitySchedulerOrderingPolicy">
           <summary>Validate Root Queue Ordering Policy</summary>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1603cd68/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index fb854b9..3054ca3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -721,10 +721,6 @@
       </component>
 
       <component name="NODEMANAGER">
-        <pre-upgrade>
-          <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env"/>
-        </pre-upgrade>
-        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1603cd68/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index a8ac1bc..1610bb5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -119,10 +119,6 @@
       </component>
       <component name="NODEMANAGER">
         <changes>
-          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-            <type>yarn-site</type>
-            <insert key="yarn.nodemanager.admin-env" value=",PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:$PATH" insert-type="append" newline-before="false" newline-after="false" />
-          </definition>
           <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_llap_mem">
             <type>yarn-site</type>
             <set key="yarn.nodemanager.kill-escape.launch-command-line" value="slider-agent,LLAP"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1603cd68/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index ae7ffc5..1cdd184 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -328,13 +328,6 @@
       </execute-stage>
 
       <!-- YARN -->
-      <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM admin env">
-        <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-          <summary>Updating YARN NodeManager admin env config</summary>
-        </task>
-      </execute-stage>
-
-      <!-- YARN -->
       <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM">
         <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_llap_mem">
           <summary>Updating YARN NodeManager config for LLAP</summary>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1603cd68/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index c2ae825..3e7e3d7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -696,7 +696,6 @@
 
       <component name="NODEMANAGER">
         <pre-upgrade>
-          <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env"/>
           <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_llap_mem"/>
         </pre-upgrade>
         <pre-downgrade/> <!--  no-op to prevent config changes on downgrade -->


[08/50] [abbrv] ambari git commit: AMBARI-21043. Backport Ambari-17694 - Kafka listeners property does not show SASL_PLAINTEXT protocol when Kerberos is enabled (rlevas)

Posted by ad...@apache.org.
AMBARI-21043. Backport Ambari-17694 - Kafka listeners property does not show SASL_PLAINTEXT protocol when Kerberos is enabled (rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/bba703bc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/bba703bc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/bba703bc

Branch: refs/heads/ambari-rest-api-explorer
Commit: bba703bc600555e596c49aef8749df65c9ac918c
Parents: 8bf136a
Author: Robert Levas <rl...@hortonworks.com>
Authored: Wed May 17 14:33:03 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Wed May 17 14:33:03 2017 -0400

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog251.java       | 47 +++++++++-
 .../stacks/HDP/2.3/services/stack_advisor.py    |  2 +-
 .../server/upgrade/UpgradeCatalog251Test.java   | 92 ++++++++++++++++++++
 3 files changed, 139 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/bba703bc/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
index 6f8f2a6..745890c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -18,9 +18,18 @@
 package org.apache.ambari.server.upgrade;
 
 import java.sql.SQLException;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.commons.lang.StringUtils;
 
 import com.google.inject.Inject;
 import com.google.inject.Injector;
@@ -33,6 +42,8 @@ public class UpgradeCatalog251 extends AbstractUpgradeCatalog {
   static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
   static final String HRC_IS_BACKGROUND_COLUMN = "is_background";
 
+  protected static final String KAFKA_BROKER_CONFIG = "kafka-broker";
+
   /**
    * Constructor.
    *
@@ -79,6 +90,40 @@ public class UpgradeCatalog251 extends AbstractUpgradeCatalog {
    */
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
+    updateKAFKAConfigs();
+  }
+
+  /**
+   * Ensure that the updates from Ambari 2.4.0 are applied in the event the initial version is
+   * Ambari 2.5.0, since this Kafka change failed to make it into Ambari 2.5.0.
+   *
+   * If the base version was before Ambari 2.5.0, this method should wind up doing nothing.
+   * @throws AmbariException
+   */
+  protected void updateKAFKAConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+    if (clusters != null) {
+      Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
+      if (clusterMap != null && !clusterMap.isEmpty()) {
+        for (final Cluster cluster : clusterMap.values()) {
+          Set<String> installedServices = cluster.getServices().keySet();
+
+          if (installedServices.contains("KAFKA") && cluster.getSecurityType() == SecurityType.KERBEROS) {
+            Config kafkaBroker = cluster.getDesiredConfigByType(KAFKA_BROKER_CONFIG);
+            if (kafkaBroker != null) {
+              String listenersPropertyValue = kafkaBroker.getProperties().get("listeners");
+              if (StringUtils.isNotEmpty(listenersPropertyValue)) {
+                String newListenersPropertyValue = listenersPropertyValue.replaceAll("\\bPLAINTEXT\\b", "PLAINTEXTSASL");
+                if(!newListenersPropertyValue.equals(listenersPropertyValue)) {
+                  updateConfigurationProperties(KAFKA_BROKER_CONFIG, Collections.singletonMap("listeners", newListenersPropertyValue), true, false);
+                }
+              }
+            }
+          }
+        }
+      }
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/bba703bc/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index 8cefdac..9efcee0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -918,7 +918,7 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
       "HIVE": {"hiveserver2-site": self.validateHiveServer2Configurations,
                "hive-site": self.validateHiveConfigurations},
       "HBASE": {"hbase-site": self.validateHBASEConfigurations},
-      "KAKFA": {"kafka-broker": self.validateKAFKAConfigurations},
+      "KAFKA": {"kafka-broker": self.validateKAFKAConfigurations},
       "RANGER": {"admin-properties": self.validateRangerAdminConfigurations,
                  "ranger-env": self.validateRangerConfigurationsEnv}
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/bba703bc/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
index 4575998..d725ec4 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
@@ -20,21 +20,29 @@ package org.apache.ambari.server.upgrade;
 
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createMockBuilder;
+import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.newCapture;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.reset;
 import static org.easymock.EasyMock.verify;
 
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
 import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.Statement;
+import java.util.Collections;
+import java.util.Map;
 
 import javax.persistence.EntityManager;
 
 import org.apache.ambari.server.actionmanager.ActionManager;
 import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.controller.MaintenanceStateHelper;
 import org.apache.ambari.server.orm.DBAccessor;
@@ -42,10 +50,12 @@ import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.easymock.Capture;
 import org.easymock.EasyMockRunner;
+import org.easymock.EasyMockSupport;
 import org.easymock.Mock;
 import org.easymock.MockType;
 import org.junit.After;
@@ -53,8 +63,10 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
+import org.springframework.security.crypto.password.PasswordEncoder;
 
 import com.google.gson.Gson;
+import com.google.inject.AbstractModule;
 import com.google.inject.Binder;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
@@ -163,4 +175,84 @@ public class UpgradeCatalog251Test {
     Assert.assertEquals(Integer.valueOf(0), captured.getDefaultValue());
     Assert.assertEquals(Short.class, captured.getType());
   }
+
+  @Test
+  public void testExecuteDMLUpdates() throws Exception {
+    Method updateKAFKAConfigs = UpgradeCatalog251.class.getDeclaredMethod("updateKAFKAConfigs");
+
+    UpgradeCatalog251 upgradeCatalog251 = createMockBuilder(UpgradeCatalog251.class)
+        .addMockedMethod(updateKAFKAConfigs)
+        .createMock();
+
+    Field field = AbstractUpgradeCatalog.class.getDeclaredField("dbAccessor");
+    field.set(upgradeCatalog251, dbAccessor);
+
+    upgradeCatalog251.updateKAFKAConfigs();
+    expectLastCall().once();
+
+    replay(upgradeCatalog251, dbAccessor);
+
+    upgradeCatalog251.executeDMLUpdates();
+
+    verify(upgradeCatalog251, dbAccessor);
+  }
+
+
+  @Test
+  public void testUpdateKAFKAConfigs() throws Exception{
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+
+    Map<String, String> initialProperties = Collections.singletonMap("listeners", "PLAINTEXT://localhost:6667,SSL://localhost:6666");
+    Map<String, String> expectedUpdates = Collections.singletonMap("listeners", "PLAINTEXTSASL://localhost:6667,SSL://localhost:6666");
+
+    final Config kafkaBroker = easyMockSupport.createNiceMock(Config.class);
+    expect(kafkaBroker.getProperties()).andReturn(initialProperties).times(1);
+    // Re-entrant test
+    expect(kafkaBroker.getProperties()).andReturn(expectedUpdates).times(1);
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+        bind(PasswordEncoder.class).toInstance(createNiceMock(PasswordEncoder.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).atLeastOnce();
+    expect(mockClusters.getClusters()).andReturn(Collections.singletonMap("normal", mockClusterExpected)).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("kafka-broker")).andReturn(kafkaBroker).atLeastOnce();
+    expect(mockClusterExpected.getSecurityType()).andReturn(SecurityType.KERBEROS).atLeastOnce();
+    expect(mockClusterExpected.getServices()).andReturn(Collections.<String, Service>singletonMap("KAFKA", null)).atLeastOnce();
+
+    UpgradeCatalog251 upgradeCatalog251 = createMockBuilder(UpgradeCatalog251.class)
+        .withConstructor(Injector.class)
+        .withArgs(mockInjector)
+        .addMockedMethod("updateConfigurationProperties", String.class,
+            Map.class, boolean.class, boolean.class)
+        .createMock();
+
+
+    // upgradeCatalog251.updateConfigurationProperties is only expected to execute once since no changes are
+    // expected when the relevant data have been previously changed
+    upgradeCatalog251.updateConfigurationProperties("kafka-broker", expectedUpdates, true, false);
+    expectLastCall().once();
+
+    easyMockSupport.replayAll();
+    replay(upgradeCatalog251);
+
+    // Execute the first time... upgrading to Ambari 2.4.0
+    upgradeCatalog251.updateKAFKAConfigs();
+
+    // Test reentry... upgrading from Ambari 2.4.0
+    upgradeCatalog251.updateKAFKAConfigs();
+
+    easyMockSupport.verifyAll();
+  }
 }


[23/50] [abbrv] ambari git commit: AMBARI-21057. Change Storage of Data on Request/Stage/Task To Reduce Redundency (dgrinenko via dlysnichenko)

Posted by ad...@apache.org.
AMBARI-21057. Change Storage of Data on Request/Stage/Task To Reduce Redundency (dgrinenko via dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f2bbe478
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f2bbe478
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f2bbe478

Branch: refs/heads/ambari-rest-api-explorer
Commit: f2bbe4781841b871993b3e8dd88e5f56169cf1b8
Parents: f7a1d4e
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Fri May 19 11:52:12 2017 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Fri May 19 11:52:12 2017 +0300

----------------------------------------------------------------------
 .../server/actionmanager/ActionManager.java     |   4 +-
 .../server/actionmanager/ActionScheduler.java   |  25 +++--
 .../ambari/server/actionmanager/Request.java    |  18 +++-
 .../server/actionmanager/RequestFactory.java    |   4 +-
 .../ambari/server/actionmanager/Stage.java      |  14 ---
 .../server/actionmanager/StageFactory.java      |   1 -
 .../server/actionmanager/StageFactoryImpl.java  |   4 +-
 .../AmbariCustomCommandExecutionHelper.java     |  12 ++-
 .../AmbariManagementControllerImpl.java         |  15 ++-
 .../server/controller/KerberosHelperImpl.java   |  51 +++++----
 .../ClusterStackVersionResourceProvider.java    |   3 +-
 .../HostStackVersionResourceProvider.java       |   4 +-
 .../internal/RequestResourceProvider.java       |   9 +-
 .../internal/RequestStageContainer.java         |  11 +-
 .../internal/StageResourceProvider.java         |   9 +-
 .../internal/UpgradeResourceProvider.java       |  13 +--
 .../server/hooks/users/UserHookService.java     |   3 +-
 .../apache/ambari/server/orm/DBAccessor.java    |  24 +++++
 .../ambari/server/orm/DBAccessorImpl.java       |  44 ++++++++
 .../server/orm/entities/RequestEntity.java      |  19 ++++
 .../ambari/server/orm/entities/StageEntity.java |  18 ----
 .../server/orm/entities/StageEntity_.java       |   4 -
 .../server/orm/helpers/dbms/DbmsHelper.java     |  21 ++++
 .../orm/helpers/dbms/GenericDbmsHelper.java     |   8 ++
 .../server/orm/helpers/dbms/MySqlHelper.java    |  12 +++
 .../server/orm/helpers/dbms/OracleHelper.java   |  12 +++
 .../server/orm/helpers/dbms/PostgresHelper.java |  12 +++
 .../ambari/server/stageplanner/RoleGraph.java   |   2 +-
 .../server/upgrade/UpgradeCatalog251.java       |  29 +++++
 .../apache/ambari/server/utils/StageUtils.java  |   8 +-
 .../main/resources/Ambari-DDL-Derby-CREATE.sql  |   2 +-
 .../main/resources/Ambari-DDL-MySQL-CREATE.sql  |   2 +-
 .../main/resources/Ambari-DDL-Oracle-CREATE.sql |   2 +-
 .../resources/Ambari-DDL-Postgres-CREATE.sql    |   2 +-
 .../resources/Ambari-DDL-SQLAnywhere-CREATE.sql |   2 +-
 .../resources/Ambari-DDL-SQLServer-CREATE.sql   |   2 +-
 .../src/main/resources/properties.json          |   1 +
 .../ExecutionCommandWrapperTest.java            |   4 +-
 .../ambari/server/actionmanager/StageTest.java  |   2 +-
 .../actionmanager/TestActionDBAccessorImpl.java |  34 +++---
 .../server/actionmanager/TestActionManager.java |   8 +-
 .../actionmanager/TestActionScheduler.java      |  43 +++++---
 .../ambari/server/actionmanager/TestStage.java  |   5 +-
 .../server/agent/HeartbeatProcessorTest.java    |   4 +-
 .../server/agent/HeartbeatTestHelper.java       |   4 +-
 .../server/agent/TestHeartbeatHandler.java      |   4 +-
 .../AmbariManagementControllerTest.java         |  18 ++--
 .../server/controller/KerberosHelperTest.java   | 105 +++++++++++++++++--
 .../internal/CalculatedStatusTest.java          |   2 +-
 ...ClusterStackVersionResourceProviderTest.java |  10 +-
 .../internal/RequestStageContainerTest.java     |   3 +-
 .../server/hooks/users/UserHookServiceTest.java |   4 +-
 .../serveraction/ServerActionExecutorTest.java  |   9 +-
 .../server/stageplanner/TestStagePlanner.java   |   2 +-
 .../ambari/server/utils/StageUtilsTest.java     |   2 +-
 55 files changed, 493 insertions(+), 196 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
index 398bc9d..13cdce1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
@@ -77,8 +77,8 @@ public class ActionManager {
     scheduler.stop();
   }
 
-  public void sendActions(List<Stage> stages, ExecuteActionRequest actionRequest) throws AmbariException {
-    Request request = requestFactory.createNewFromStages(stages, actionRequest);
+  public void sendActions(List<Stage> stages, String clusterHostInfo, ExecuteActionRequest actionRequest) throws AmbariException {
+    Request request = requestFactory.createNewFromStages(stages, clusterHostInfo, actionRequest);
     sendActions(request, actionRequest);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java
index 316f2bd..d3157e2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java
@@ -465,9 +465,10 @@ class ActionScheduler implements Runnable {
 
         //Schedule what we have so far
 
+
         for (ExecutionCommand cmd : commandsToSchedule) {
           ConfigHelper.processHiddenAttribute(cmd.getConfigurations(), cmd.getConfigurationAttributes(), cmd.getRole(), false);
-          processHostRole(stage, cmd, commandsToStart, commandsToUpdate);
+          processHostRole(request, stage, cmd, commandsToStart, commandsToUpdate);
         }
 
         LOG.debug("==> Commands to start: {}", commandsToStart.size());
@@ -1090,7 +1091,7 @@ class ActionScheduler implements Runnable {
     return serviceEventMap;
   }
 
-  private void processHostRole(Stage s, ExecutionCommand cmd, List<ExecutionCommand> commandsToStart,
+  private void processHostRole(RequestEntity r, Stage s, ExecutionCommand cmd, List<ExecutionCommand> commandsToStart,
                                List<ExecutionCommand> commandsToUpdate)
     throws AmbariException {
     long now = System.currentTimeMillis();
@@ -1106,23 +1107,23 @@ class ActionScheduler implements Runnable {
     }
     s.setLastAttemptTime(hostname, roleStr, now);
     s.incrementAttemptCount(hostname, roleStr);
-    /** change the hostname in the command for the host itself **/
-    cmd.setHostname(hostsMap.getHostMap(hostname));
 
 
-    //Try to get clusterHostInfo from cache
+    String requestPK = r.getRequestId().toString();
     String stagePk = s.getStageId() + "-" + s.getRequestId();
-    Map<String, Set<String>> clusterHostInfo = clusterHostInfoCache.getIfPresent(stagePk);
+
+    // Try to get clusterHostInfo from cache
+    Map<String, Set<String>> clusterHostInfo = clusterHostInfoCache.getIfPresent(requestPK);
 
     if (clusterHostInfo == null) {
       Type type = new TypeToken<Map<String, Set<String>>>() {}.getType();
-      clusterHostInfo = StageUtils.getGson().fromJson(s.getClusterHostInfo(), type);
-      clusterHostInfoCache.put(stagePk, clusterHostInfo);
+      clusterHostInfo = StageUtils.getGson().fromJson(r.getClusterHostInfo(), type);
+      clusterHostInfoCache.put(requestPK, clusterHostInfo);
     }
 
     cmd.setClusterHostInfo(clusterHostInfo);
 
-    //Try to get commandParams from cache and merge them with command-level parameters
+    // Try to get commandParams from cache and merge them with command-level parameters
     Map<String, String> commandParams = commandParamsStageCache.getIfPresent(stagePk);
 
     if (commandParams == null){
@@ -1143,10 +1144,10 @@ class ActionScheduler implements Runnable {
         }
       }
     } catch (ClusterNotFoundException cnfe) {
-      //NOP
+      // NOP
     }
 
-    //Try to get hostParams from cache and merge them with command-level parameters
+    // Try to get hostParams from cache and merge them with command-level parameters
     Map<String, String> hostParams = hostParamsStageCache.getIfPresent(stagePk);
     if (hostParams == null) {
       Type type = new TypeToken<Map<String, String>>() {}.getType();
@@ -1157,6 +1158,8 @@ class ActionScheduler implements Runnable {
     hostParamsCmd.putAll(hostParams);
     cmd.setHostLevelParams(hostParamsCmd);
 
+    // change the hostname in the command for the host itself
+    cmd.setHostname(hostsMap.getHostMap(hostname));
 
     commandsToUpdate.add(cmd);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
index 10e0d57..baf67fe 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
@@ -58,6 +58,7 @@ public class Request {
   private long createTime;
   private long startTime;
   private long endTime;
+  private String clusterHostInfo;
 
   /**
    * If true, this request can not be executed in parallel with any another
@@ -94,6 +95,7 @@ public class Request {
     this.startTime = -1;
     this.endTime = -1;
     this.exclusive = false;
+    this.clusterHostInfo = "{}";
 
     if (-1L != this.clusterId) {
       try {
@@ -110,7 +112,7 @@ public class Request {
    * Construct new entity from stages provided
    */
   //TODO remove when not needed
-  public Request(@Assisted Collection<Stage> stages, Clusters clusters){
+  public Request(@Assisted Collection<Stage> stages, @Assisted String clusterHostInfo, Clusters clusters){
     if (stages != null && !stages.isEmpty()) {
       this.stages.addAll(stages);
       Stage stage = stages.iterator().next();
@@ -129,6 +131,7 @@ public class Request {
       this.createTime = System.currentTimeMillis();
       this.startTime = -1;
       this.endTime = -1;
+      this.clusterHostInfo = clusterHostInfo;
       this.requestType = RequestType.INTERNAL_REQUEST;
       this.exclusive = false;
     } else {
@@ -143,9 +146,9 @@ public class Request {
    * Construct new entity from stages provided
    */
   //TODO remove when not needed
-  public Request(@Assisted Collection<Stage> stages, @Assisted ExecuteActionRequest actionRequest,
+  public Request(@Assisted Collection<Stage> stages, @Assisted String clusterHostInfo, @Assisted ExecuteActionRequest actionRequest,
                  Clusters clusters, Gson gson) throws AmbariException {
-    this(stages, clusters);
+    this(stages, clusterHostInfo, clusters);
     if (actionRequest != null) {
       this.resourceFilters = actionRequest.getResourceFilters();
       this.operationLevel = actionRequest.getOperationLevel();
@@ -183,6 +186,7 @@ public class Request {
     this.exclusive = entity.isExclusive();
     this.requestContext = entity.getRequestContext();
     this.inputs = entity.getInputs();
+    this.clusterHostInfo = entity.getClusterHostInfo();
 
     this.requestType = entity.getRequestType();
     this.commandName = entity.getCommandName();
@@ -245,6 +249,7 @@ public class Request {
     requestEntity.setRequestScheduleId(requestScheduleId);
     requestEntity.setStatus(status);
     requestEntity.setDisplayStatus(displayStatus);
+    requestEntity.setClusterHostInfo(clusterHostInfo);
     //TODO set all fields
 
     if (resourceFilters != null) {
@@ -281,6 +286,13 @@ public class Request {
     return requestEntity;
   }
 
+  public String getClusterHostInfo() {
+    return clusterHostInfo;
+  }
+
+  public void setClusterHostInfo(String clusterHostInfo) {
+    this.clusterHostInfo = clusterHostInfo;
+  }
 
   public Long getClusterId() {
     return Long.valueOf(clusterId);

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/RequestFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/RequestFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/RequestFactory.java
index bc0223c..8a22796 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/RequestFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/RequestFactory.java
@@ -30,9 +30,9 @@ public interface RequestFactory {
 
   Request createNew(long requestId, @Assisted("clusterId") Long clusterName) throws AmbariException;
 
-  Request createNewFromStages(Collection<Stage> stages);
+  Request createNewFromStages(Collection<Stage> stages, String clusterHostInfo);
 
-  Request createNewFromStages(Collection<Stage> stages, ExecuteActionRequest actionRequest);
+  Request createNewFromStages(Collection<Stage> stages, String clusterHostInfo, ExecuteActionRequest actionRequest);
 
   Request createExisting(RequestEntity entity);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
index 574afa1..5295536 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
@@ -76,7 +76,6 @@ public class Stage {
   private final String requestContext;
   private HostRoleStatus status = HostRoleStatus.PENDING;
   private HostRoleStatus displayStatus = HostRoleStatus.PENDING;
-  private String clusterHostInfo;
   private String commandParamsStage;
   private String hostParamsStage;
 
@@ -110,7 +109,6 @@ public class Stage {
       @Assisted("clusterName") @Nullable String clusterName,
       @Assisted("clusterId") long clusterId,
       @Assisted("requestContext") @Nullable String requestContext,
-      @Assisted("clusterHostInfo") String clusterHostInfo,
       @Assisted("commandParamsStage") String commandParamsStage,
       @Assisted("hostParamsStage") String hostParamsStage,
       HostRoleCommandFactory hostRoleCommandFactory, ExecutionCommandWrapperFactory ecwFactory) {
@@ -120,7 +118,6 @@ public class Stage {
     this.clusterName = clusterName;
     this.clusterId = clusterId;
     this.requestContext = requestContext == null ? "" : requestContext;
-    this.clusterHostInfo = clusterHostInfo;
     this.commandParamsStage = commandParamsStage;
     this.hostParamsStage = hostParamsStage;
 
@@ -155,7 +152,6 @@ public class Stage {
     }
 
     requestContext = stageEntity.getRequestContext();
-    clusterHostInfo = stageEntity.getClusterHostInfo();
     commandParamsStage = stageEntity.getCommandParamsStage();
     hostParamsStage = stageEntity.getHostParamsStage();
     commandExecutionType = stageEntity.getCommandExecutionType();
@@ -197,7 +193,6 @@ public class Stage {
     stageEntity.setRequestContext(requestContext);
     stageEntity.setHostRoleCommands(new ArrayList<HostRoleCommandEntity>());
     stageEntity.setRoleSuccessCriterias(new ArrayList<RoleSuccessCriteriaEntity>());
-    stageEntity.setClusterHostInfo(clusterHostInfo);
     stageEntity.setCommandParamsStage(commandParamsStage);
     stageEntity.setHostParamsStage(hostParamsStage);
     stageEntity.setCommandExecutionType(commandExecutionType);
@@ -264,14 +259,6 @@ public class Stage {
     return commandsToScheduleSet;
   }
 
-  public String getClusterHostInfo() {
-    return clusterHostInfo;
-  }
-
-  public void setClusterHostInfo(String clusterHostInfo) {
-    this.clusterHostInfo = clusterHostInfo;
-  }
-
   public String getCommandParamsStage() {
     return commandParamsStage;
   }
@@ -935,7 +922,6 @@ public class Stage {
     builder.append("clusterName=").append(clusterName).append("\n");
     builder.append("logDir=").append(logDir).append("\n");
     builder.append("requestContext=").append(requestContext).append("\n");
-    builder.append("clusterHostInfo=").append(clusterHostInfo).append("\n");
     builder.append("commandParamsStage=").append(commandParamsStage).append("\n");
     builder.append("hostParamsStage=").append(hostParamsStage).append("\n");
     builder.append("status=").append(status).append("\n");

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactory.java
index a88558c..0d1a326 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactory.java
@@ -29,7 +29,6 @@ public interface StageFactory {
       @Assisted("clusterName") String clusterName,
       @Assisted("clusterId") long clusterId,
       @Assisted("requestContext") String requestContext,
-      @Assisted("clusterHostInfo") String clusterHostInfo,
       @Assisted("commandParamsStage") String commandParamsStage,
       @Assisted("hostParamsStage") String hostParamsStage);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactoryImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactoryImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactoryImpl.java
index 3cad82d..0827639 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactoryImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactoryImpl.java
@@ -43,7 +43,6 @@ public class StageFactoryImpl implements StageFactory {
    * @param clusterName Cluster name
    * @param clusterId Cluster ID
    * @param requestContext Information about the context of the request
-   * @param clusterHostInfo Information about the host
    * @param commandParamsStage Information about the command parameters
    * @param hostParamsStage Information about the host parameters for the stage
    * @return An instance of a Stage with the provided params.
@@ -54,10 +53,9 @@ public class StageFactoryImpl implements StageFactory {
                          @Assisted("clusterName") String clusterName,
                          @Assisted("clusterId") long clusterId,
                          @Assisted("requestContext") String requestContext,
-                         @Assisted("clusterHostInfo") String clusterHostInfo,
                          @Assisted("commandParamsStage") String commandParamsStage,
                          @Assisted("hostParamsStage") String hostParamsStage) {
-    return new Stage(requestId, logDir, clusterName, clusterId, requestContext, clusterHostInfo, commandParamsStage, hostParamsStage,
+    return new Stage(requestId, logDir, clusterName, clusterId, requestContext, commandParamsStage, hostParamsStage,
         injector.getInstance(HostRoleCommandFactory.class),
         injector.getInstance(ExecutionCommandWrapperFactory.class));
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index ab8b659..520dcab 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -75,10 +75,12 @@ import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
+import org.apache.ambari.server.orm.dao.RequestDAO;
 import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
 import org.apache.ambari.server.orm.entities.OperatingSystemEntity;
 import org.apache.ambari.server.orm.entities.RepositoryEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.RequestEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.CommandScriptDefinition;
@@ -176,6 +178,9 @@ public class AmbariCustomCommandExecutionHelper {
   private ClusterVersionDAO clusterVersionDAO;
 
   @Inject
+  private RequestDAO requestDAO;
+
+  @Inject
   private HostRoleCommandDAO hostRoleCommandDAO;
 
   private Map<String, Map<String, Map<String, String>>> configCredentialsForService = new HashMap<>();
@@ -1014,7 +1019,12 @@ public class AmbariCustomCommandExecutionHelper {
           StageUtils.getClusterHostInfo(cluster));
 
       // Reset cluster host info as it has changed
-      stage.setClusterHostInfo(clusterHostInfoJson);
+      RequestEntity requestEntity = requestDAO.findByPK(stage.getRequestId());
+
+      if (requestEntity != null) {
+        requestEntity.setClusterHostInfo(clusterHostInfoJson);
+        requestDAO.merge(requestEntity);
+      }
 
       Map<String, String> commandParams = new HashMap<>();
       if (serviceName.equals(Service.Type.HBASE.name())) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index f9375aa..25b12de 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -1042,14 +1042,14 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   }
 
   private Stage createNewStage(long id, Cluster cluster, long requestId,
-                               String requestContext, String clusterHostInfo,
+                               String requestContext,
                                String commandParamsStage, String hostParamsStage) {
     String logDir = BASE_LOG_DIR + File.pathSeparator + requestId;
     Stage stage =
         stageFactory.createNew(requestId, logDir,
           null == cluster ? null : cluster.getClusterName(),
           null == cluster ? -1L : cluster.getClusterId(),
-          requestContext, clusterHostInfo, commandParamsStage,
+          requestContext, commandParamsStage,
           hostParamsStage);
     stage.setStageId(id);
     return stage;
@@ -2631,8 +2631,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
           customCommandExecutionHelper.createDefaultHostParams(cluster));
 
       Stage stage = createNewStage(requestStages.getLastStageId(), cluster,
-          requestStages.getId(), requestProperties.get(REQUEST_CONTEXT_PROPERTY),
-          clusterHostInfoJson, "{}", hostParamsJson);
+          requestStages.getId(), requestProperties.get(REQUEST_CONTEXT_PROPERTY),"{}", hostParamsJson);
       boolean skipFailure = false;
       if (requestProperties.containsKey(Setting.SETTING_NAME_SKIP_FAILURE) && requestProperties.get(Setting.SETTING_NAME_SKIP_FAILURE).equalsIgnoreCase("true")) {
         skipFailure = true;
@@ -2982,6 +2981,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         rg.setCommandExecutionType(CommandExecutionType.DEPENDENCY_ORDERED);
       }
       rg.build(stage);
+      requestStages.setClusterHostInfo(clusterHostInfoJson);
       requestStages.addStages(rg.getStages());
 
       if (!componentsToEnableKerberos.isEmpty()) {
@@ -3067,9 +3067,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     Map<String, Set<String>> clusterHostInfo = StageUtils.getClusterHostInfo(cluster);
     String clusterHostInfoJson = StageUtils.getGson().toJson(clusterHostInfo);
     Map<String, String> hostParamsCmd = customCommandExecutionHelper.createDefaultHostParams(cluster);
-    Stage stage = createNewStage(0, cluster,
-                                 1, "",
-                                 clusterHostInfoJson, "{}", "");
+    Stage stage = createNewStage(0, cluster,1, "","{}", "");
 
 
     Map<String, Map<String, String>> configTags = configHelper.getEffectiveDesiredTags(cluster, scHost.getHostName());
@@ -4037,7 +4035,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     commandParamsForStage = gson.toJson(commandParamsStage);
 
     Stage stage = createNewStage(requestStageContainer.getLastStageId(), cluster, requestId, requestContext,
-        jsons.getClusterHostInfo(), commandParamsForStage, jsons.getHostParamsForStage());
+        commandParamsForStage, jsons.getHostParamsForStage());
 
     if (actionRequest.isCommand()) {
       customCommandExecutionHelper.addExecutionCommandsToStage(actionExecContext, stage,
@@ -4058,6 +4056,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     List<Stage> stages = rg.getStages();
 
     if (stages != null && !stages.isEmpty()) {
+      requestStageContainer.setClusterHostInfo(jsons.getClusterHostInfo());
       requestStageContainer.addStages(stages);
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
index 6687942..5c4728a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
@@ -2186,14 +2186,13 @@ public class KerberosHelperImpl implements KerberosHelper {
    * @return a newly created Stage
    */
   private Stage createNewStage(long id, Cluster cluster, long requestId,
-                               String requestContext, String clusterHostInfo,
-                               String commandParams, String hostParams) {
+                               String requestContext, String commandParams, String hostParams) {
+
     Stage stage = stageFactory.createNew(requestId,
         BASE_LOG_DIR + File.pathSeparator + requestId,
         cluster.getClusterName(),
         cluster.getClusterId(),
         requestContext,
-        clusterHostInfo,
         commandParams,
         hostParams);
 
@@ -2221,14 +2220,14 @@ public class KerberosHelperImpl implements KerberosHelper {
    * @param timeout           the timeout for the task/action  @return a newly created Stage
    */
   private Stage createServerActionStage(long id, Cluster cluster, long requestId,
-                                        String requestContext, String clusterHostInfo,
+                                        String requestContext,
                                         String commandParams, String hostParams,
                                         Class<? extends ServerAction> actionClass,
                                         ServiceComponentHostServerActionEvent event,
                                         Map<String, String> commandParameters, String commandDetail,
                                         Integer timeout) throws AmbariException {
 
-    Stage stage = createNewStage(id, cluster, requestId, requestContext, clusterHostInfo, commandParams, hostParams);
+    Stage stage = createNewStage(id, cluster, requestId, requestContext,  commandParams, hostParams);
     stage.addServerActionCommand(actionClass.getName(), null, Role.AMBARI_SERVER_ACTION,
         RoleCommand.EXECUTE, cluster.getClusterName(), event, commandParameters, commandDetail,
         ambariManagementController.findConfigurationTagsWithOverrides(cluster, null), timeout,
@@ -2769,7 +2768,6 @@ public class KerberosHelperImpl implements KerberosHelper {
           cluster,
           requestStageContainer.getId(),
           "Preparing Operations",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           PrepareEnableKerberosServerAction.class,
@@ -2780,6 +2778,8 @@ public class KerberosHelperImpl implements KerberosHelper {
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2792,7 +2792,6 @@ public class KerberosHelperImpl implements KerberosHelper {
           cluster,
           requestStageContainer.getId(),
           "Preparing Operations",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           PrepareKerberosIdentitiesServerAction.class,
@@ -2803,6 +2802,8 @@ public class KerberosHelperImpl implements KerberosHelper {
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2815,7 +2816,6 @@ public class KerberosHelperImpl implements KerberosHelper {
           cluster,
           requestStageContainer.getId(),
           "Preparing Operations",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           PrepareDisableKerberosServerAction.class,
@@ -2826,6 +2826,8 @@ public class KerberosHelperImpl implements KerberosHelper {
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2838,7 +2840,6 @@ public class KerberosHelperImpl implements KerberosHelper {
           cluster,
           requestStageContainer.getId(),
           "Create Principals",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           CreatePrincipalsServerAction.class,
@@ -2849,6 +2850,8 @@ public class KerberosHelperImpl implements KerberosHelper {
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2861,7 +2864,6 @@ public class KerberosHelperImpl implements KerberosHelper {
           cluster,
           requestStageContainer.getId(),
           "Destroy Principals",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           DestroyPrincipalsServerAction.class,
@@ -2872,6 +2874,8 @@ public class KerberosHelperImpl implements KerberosHelper {
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2884,7 +2888,6 @@ public class KerberosHelperImpl implements KerberosHelper {
           cluster,
           requestStageContainer.getId(),
           "Configure Ambari Identity",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           ConfigureAmbariIdentitiesServerAction.class,
@@ -2895,6 +2898,8 @@ public class KerberosHelperImpl implements KerberosHelper {
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2907,7 +2912,6 @@ public class KerberosHelperImpl implements KerberosHelper {
           cluster,
           requestStageContainer.getId(),
           "Create Keytabs",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           CreateKeytabFilesServerAction.class,
@@ -2918,6 +2922,8 @@ public class KerberosHelperImpl implements KerberosHelper {
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2933,7 +2939,6 @@ public class KerberosHelperImpl implements KerberosHelper {
           cluster,
           requestStageContainer.getId(),
           "Distribute Keytabs",
-          clusterHostInfoJson,
           StageUtils.getGson().toJson(commandParameters),
           hostParamsJson);
 
@@ -2958,6 +2963,8 @@ public class KerberosHelperImpl implements KerberosHelper {
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2999,12 +3006,13 @@ public class KerberosHelperImpl implements KerberosHelper {
         cluster,
         requestStageContainer.getId(),
         "Disable security",
-        clusterHostInfoJson,
         StageUtils.getGson().toJson(commandParameters),
         hostParamsJson);
       addDisableSecurityCommandToAllServices(cluster, stage);
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -3042,7 +3050,6 @@ public class KerberosHelperImpl implements KerberosHelper {
         cluster,
         requestStageContainer.getId(),
         "Stopping ZooKeeper",
-        clusterHostInfoJson,
         StageUtils.getGson().toJson(commandParameters),
         hostParamsJson);
       for (ServiceComponent component : zookeeper.getServiceComponents().values()) {
@@ -3056,6 +3063,8 @@ public class KerberosHelperImpl implements KerberosHelper {
       }
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -3071,7 +3080,6 @@ public class KerberosHelperImpl implements KerberosHelper {
           cluster,
           requestStageContainer.getId(),
           "Delete Keytabs",
-          clusterHostInfoJson,
           StageUtils.getGson().toJson(commandParameters),
           hostParamsJson);
 
@@ -3099,6 +3107,8 @@ public class KerberosHelperImpl implements KerberosHelper {
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -3111,7 +3121,6 @@ public class KerberosHelperImpl implements KerberosHelper {
           cluster,
           requestStageContainer.getId(),
           "Update Configurations",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           UpdateKerberosConfigsServerAction.class,
@@ -3122,6 +3131,8 @@ public class KerberosHelperImpl implements KerberosHelper {
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -3145,7 +3156,6 @@ public class KerberosHelperImpl implements KerberosHelper {
           cluster,
           requestStageContainer.getId(),
           "Finalize Operations",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           FinalizeKerberosServerAction.class,
@@ -3155,6 +3165,8 @@ public class KerberosHelperImpl implements KerberosHelper {
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -3167,7 +3179,6 @@ public class KerberosHelperImpl implements KerberosHelper {
           cluster,
           requestStageContainer.getId(),
           "Kerberization Clean Up",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           CleanupServerAction.class,
@@ -3178,6 +3189,8 @@ public class KerberosHelperImpl implements KerberosHelper {
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index a8bb696..f8016a5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -542,7 +542,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
       }
 
       Stage stage = stageFactory.createNew(req.getId(), "/tmp/ambari", cluster.getClusterName(),
-          cluster.getClusterId(), stageName, clusterHostInfoJson, "{}", hostParamsJson);
+          cluster.getClusterId(), stageName, "{}", hostParamsJson);
 
       // if you have 1000 hosts (10 stages with 100 installs), we want to ensure
       // that a single failure doesn't cause all other stages to abort; set the
@@ -590,6 +590,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
               repoVersionEnt.getDisplayName()));
     }
 
+    req.setClusterHostInfo(clusterHostInfoJson);
     req.addStages(stages);
     req.persist();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
index 811ce9b..92edeb8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
@@ -513,7 +513,6 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
             cluster.getClusterName(),
             cluster.getClusterId(),
             caption,
-            clusterHostInfoJson,
             "{}",
             StageUtils.getGson().toJson(hostLevelParams));
 
@@ -522,6 +521,7 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
       stageId = 1L;
     }
     stage.setStageId(stageId);
+    req.setClusterHostInfo(clusterHostInfoJson);
     req.addStages(Collections.singletonList(stage));
 
     try {
@@ -561,7 +561,6 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
       cluster.getClusterName(),
       cluster.getClusterId(),
       caption,
-      clusterHostInfoJson,
       StageUtils.getGson().toJson(commandParams),
       StageUtils.getGson().toJson(hostLevelParams));
 
@@ -570,6 +569,7 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
       stageId = 1L;
     }
     stage.setStageId(stageId);
+    req.setClusterHostInfo(clusterHostInfoJson);
     req.addStages(Collections.singletonList(stage));
 
     actionContext = new ActionExecutionContext(

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
index c405995..d82ff25 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
@@ -103,6 +103,7 @@ public class RequestResourceProvider extends AbstractControllerResourceProvider
   public static final String REQUEST_SOURCE_SCHEDULE_HREF = "Requests/request_schedule/href";
   protected static final String REQUEST_TYPE_ID = "Requests/type";
   protected static final String REQUEST_INPUTS_ID = "Requests/inputs";
+  protected static final String REQUEST_CLUSTER_HOST_INFO_ID = "Requests/cluster_host_info";
   protected static final String REQUEST_RESOURCE_FILTER_ID = "Requests/resource_filters";
   protected static final String REQUEST_OPERATION_LEVEL_ID = "Requests/operation_level";
   protected static final String REQUEST_CREATE_TIME_ID = "Requests/create_time";
@@ -158,7 +159,9 @@ public class RequestResourceProvider extends AbstractControllerResourceProvider
     REQUEST_QUEUED_TASK_CNT_ID,
     REQUEST_PROGRESS_PERCENT_ID,
     REQUEST_REMOVE_PENDING_HOST_REQUESTS_ID,
-    REQUEST_PENDING_HOST_REQUEST_COUNT_ID);
+    REQUEST_PENDING_HOST_REQUEST_COUNT_ID,
+    REQUEST_CLUSTER_HOST_INFO_ID
+  );
 
   // ----- Constructors ----------------------------------------------------
 
@@ -752,6 +755,10 @@ public class RequestResourceProvider extends AbstractControllerResourceProvider
       resource.setProperty(REQUEST_INPUTS_ID, value);
     }
 
+    if (isPropertyRequested(REQUEST_CLUSTER_HOST_INFO_ID, requestedPropertyIds)) {
+      resource.setProperty(REQUEST_CLUSTER_HOST_INFO_ID, entity.getClusterHostInfo());
+    }
+
     setResourceProperty(resource, REQUEST_RESOURCE_FILTER_ID,
         org.apache.ambari.server.actionmanager.Request.filtersFromEntity(entity),
         requestedPropertyIds);

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestStageContainer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestStageContainer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestStageContainer.java
index c37be91..3f67704 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestStageContainer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestStageContainer.java
@@ -65,6 +65,8 @@ public class RequestStageContainer {
 
   private ExecuteActionRequest actionRequest = null;
 
+  private String clusterHostInfo = null;
+
   /**
    * Logger
    */
@@ -99,6 +101,7 @@ public class RequestStageContainer {
     this.requestFactory = factory;
     this.actionManager = manager;
     this.actionRequest = actionRequest;
+    this.clusterHostInfo = "{}";
   }
 
   /**
@@ -110,6 +113,10 @@ public class RequestStageContainer {
     return id;
   }
 
+  public void setClusterHostInfo(String clusterHostInfo){
+    this.clusterHostInfo = clusterHostInfo;
+  }
+
   /**
    * Add stages to request.
    *
@@ -202,8 +209,8 @@ public class RequestStageContainer {
   public void persist() throws AmbariException {
     if (!stages.isEmpty()) {
       Request request = (null == actionRequest)
-          ? requestFactory.createNewFromStages(stages)
-          : requestFactory.createNewFromStages(stages, actionRequest);
+          ? requestFactory.createNewFromStages(stages, clusterHostInfo)
+          : requestFactory.createNewFromStages(stages, clusterHostInfo, actionRequest);
 
       if (null != requestContext) {
         request.setRequestContext(requestContext);

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
index ec3688d..06aa68b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
@@ -47,6 +47,7 @@ import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
 import org.apache.ambari.server.controller.utilities.PredicateHelper;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.orm.dao.HostRoleCommandStatusSummaryDTO;
+import org.apache.ambari.server.orm.dao.RequestDAO;
 import org.apache.ambari.server.orm.dao.StageDAO;
 import org.apache.ambari.server.orm.entities.StageEntity;
 import org.apache.ambari.server.state.Cluster;
@@ -91,7 +92,6 @@ public class StageResourceProvider extends AbstractControllerResourceProvider im
   public static final String STAGE_REQUEST_ID = "Stage/request_id";
   public static final String STAGE_LOG_INFO = "Stage/log_info";
   public static final String STAGE_CONTEXT = "Stage/context";
-  public static final String STAGE_CLUSTER_HOST_INFO = "Stage/cluster_host_info";
   public static final String STAGE_COMMAND_PARAMS = "Stage/command_params";
   public static final String STAGE_HOST_PARAMS = "Stage/host_params";
   public static final String STAGE_SKIPPABLE = "Stage/skippable";
@@ -119,7 +119,6 @@ public class StageResourceProvider extends AbstractControllerResourceProvider im
     PROPERTY_IDS.add(STAGE_REQUEST_ID);
     PROPERTY_IDS.add(STAGE_LOG_INFO);
     PROPERTY_IDS.add(STAGE_CONTEXT);
-    PROPERTY_IDS.add(STAGE_CLUSTER_HOST_INFO);
     PROPERTY_IDS.add(STAGE_COMMAND_PARAMS);
     PROPERTY_IDS.add(STAGE_HOST_PARAMS);
     PROPERTY_IDS.add(STAGE_SKIPPABLE);
@@ -307,12 +306,6 @@ public class StageResourceProvider extends AbstractControllerResourceProvider im
     setResourceProperty(resource, STAGE_REQUEST_ID, entity.getRequestId(), requestedIds);
     setResourceProperty(resource, STAGE_CONTEXT, entity.getRequestContext(), requestedIds);
 
-    // this property is lazy loaded in JPA; don't use it unless requested
-    if (isPropertyRequested(STAGE_CLUSTER_HOST_INFO, requestedIds)) {
-      resource.setProperty(STAGE_CLUSTER_HOST_INFO, entity.getClusterHostInfo());
-    }
-
-    // this property is lazy loaded in JPA; don't use it unless requested
     if (isPropertyRequested(STAGE_COMMAND_PARAMS, requestedIds)) {
       String value = entity.getCommandParamsStage();
       if (!StringUtils.isBlank(value)) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 623851a..0ebf3aa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -1371,8 +1371,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
         cluster, context.getEffectiveStackId());
 
     Stage stage = s_stageFactory.get().createNew(request.getId().longValue(), "/tmp/ambari",
-        cluster.getClusterName(), cluster.getClusterId(), entity.getText(),
-        jsons.getClusterHostInfo(), jsons.getCommandParamsForStage(),
+        cluster.getClusterName(), cluster.getClusterId(), entity.getText(), jsons.getCommandParamsForStage(),
         jsons.getHostParamsForStage());
 
     stage.setSkippable(skippable);
@@ -1454,8 +1453,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
         cluster, context.getEffectiveStackId());
 
     Stage stage = s_stageFactory.get().createNew(request.getId().longValue(), "/tmp/ambari",
-        cluster.getClusterName(), cluster.getClusterId(), entity.getText(),
-        jsons.getClusterHostInfo(), jsons.getCommandParamsForStage(),
+        cluster.getClusterName(), cluster.getClusterId(), entity.getText(), jsons.getCommandParamsForStage(),
         jsons.getHostParamsForStage());
 
     stage.setSkippable(skippable);
@@ -1516,8 +1514,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
         cluster, context.getEffectiveStackId());
 
     Stage stage = s_stageFactory.get().createNew(request.getId().longValue(), "/tmp/ambari",
-        cluster.getClusterName(), cluster.getClusterId(), entity.getText(),
-        jsons.getClusterHostInfo(), jsons.getCommandParamsForStage(),
+        cluster.getClusterName(), cluster.getClusterId(), entity.getText(), jsons.getCommandParamsForStage(),
         jsons.getHostParamsForStage());
 
     stage.setSkippable(skippable);
@@ -1648,8 +1645,8 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
         cluster, context.getEffectiveStackId());
 
     Stage stage = s_stageFactory.get().createNew(request.getId().longValue(), "/tmp/ambari",
-        cluster.getClusterName(), cluster.getClusterId(), stageText, jsons.getClusterHostInfo(),
-        jsons.getCommandParamsForStage(), jsons.getHostParamsForStage());
+        cluster.getClusterName(), cluster.getClusterId(), stageText, jsons.getCommandParamsForStage(),
+      jsons.getHostParamsForStage());
 
     stage.setSkippable(skippable);
     stage.setAutoSkipFailureSupported(supportsAutoSkipOnFailure);

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/hooks/users/UserHookService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/hooks/users/UserHookService.java b/ambari-server/src/main/java/org/apache/ambari/server/hooks/users/UserHookService.java
index 69463ab..149e2f0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/hooks/users/UserHookService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/hooks/users/UserHookService.java
@@ -139,7 +139,7 @@ public class UserHookService implements HookService {
       String stageContextText = String.format(POST_USER_CREATION_REQUEST_CONTEXT, ctx.getUserGroups().size());
 
       Stage stage = stageFactory.createNew(requestStageContainer.getId(), configuration.getServerTempDir() + File.pathSeparatorChar + requestStageContainer.getId(), clsData.getClusterName(),
-          clsData.getClusterId(), stageContextText, "{}", "{}", "{}");
+          clsData.getClusterId(), stageContextText, "{}", "{}");
       stage.setStageId(requestStageContainer.getLastStageId());
 
       ServiceComponentHostServerActionEvent serverActionEvent = new ServiceComponentHostServerActionEvent("ambari-server-host", System.currentTimeMillis());
@@ -148,6 +148,7 @@ public class UserHookService implements HookService {
       stage.addServerActionCommand(PostUserCreationHookServerAction.class.getName(), "ambari", Role.AMBARI_SERVER_ACTION,
           RoleCommand.EXECUTE, clsData.getClusterName(), serverActionEvent, commandParams, stageContextText, null, null, false, false);
 
+      requestStageContainer.setClusterHostInfo("{}");
       requestStageContainer.addStages(Collections.singletonList(stage));
       requestStageContainer.persist();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
index bf8ff48..4f29d61 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
@@ -622,6 +622,30 @@ public interface DBAccessor {
    */
   void addDefaultConstraint(String tableName, DBColumnInfo column) throws SQLException;
 
+  /**
+   * Move column data from {@code sourceTableName} to {@code targetTableName} using {@code sourceIDFieldName} and
+   * {@code targetIDFieldName} keys to match right rows
+   *
+   * @param sourceTableName
+   *          the source table name
+   * @param sourceColumn
+   *          the source column name
+   * @param sourceIDFieldName
+   *          the source id key filed name matched with {@code targetIDFieldName}
+   * @param targetTableName
+   *          the target table name
+   * @param targetColumn
+   *          the target column name
+   * @param targetIDFieldName
+   *          the target id key name matched with {@code sourceIDFieldName}
+   * @param isColumnNullable
+   *          should be target column nullable or not
+   *
+   * @throws SQLException
+   */
+  void moveColumnToAnotherTable(String sourceTableName, DBColumnInfo sourceColumn, String sourceIDFieldName,
+       String targetTableName, DBColumnInfo targetColumn, String targetIDFieldName, boolean isColumnNullable) throws SQLException;
+
   enum DbType {
     ORACLE,
     MYSQL,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
index c11589d..9c6425c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
@@ -1304,4 +1304,48 @@ public class DBAccessorImpl implements DBAccessor {
 
     return valueString;
   }
+
+  /**
+   * Move column data from {@code sourceTableName} to {@code targetTableName} using {@code sourceIDFieldName} and
+   * {@code targetIDFieldName} keys to match right rows
+   *
+   * @param sourceTableName
+   *          the source table name
+   * @param sourceColumn
+   *          the source column name
+   * @param sourceIDFieldName
+   *          the source id key filed name matched with {@code targetIDFieldName}
+   * @param targetTableName
+   *          the target table name
+   * @param targetColumn
+   *          the target column name
+   * @param targetIDFieldName
+   *          the target id key name matched with {@code sourceIDFieldName}
+   * @param isColumnNullable
+   *          should be target column nullable or not
+   *
+   * @throws SQLException
+   */
+  @Override
+  public void moveColumnToAnotherTable(String sourceTableName, DBColumnInfo sourceColumn, String sourceIDFieldName,
+              String targetTableName, DBColumnInfo targetColumn, String targetIDFieldName,  boolean isColumnNullable) throws SQLException {
+
+    if (this.tableHasColumn(sourceTableName, sourceIDFieldName)) {
+
+      final String moveSQL = dbmsHelper.getCopyColumnToAnotherTableStatement(sourceTableName, sourceColumn.getName(),
+        sourceIDFieldName, targetTableName, targetColumn.getName(),targetIDFieldName);
+
+      targetColumn.setNullable(true);  // setting column nullable by default
+
+      this.addColumn(targetTableName, targetColumn);
+      this.executeUpdate(moveSQL, false);
+
+      if (!isColumnNullable) {
+        // this can will trigger exception if some record is null
+        // ToDo: add default option
+        this.setColumnNullable(targetTableName, targetColumn.getName(), false);
+      }
+      this.dropColumn(sourceTableName, sourceColumn.getName());
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestEntity.java
index 099d08f..adf6647 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestEntity.java
@@ -26,6 +26,7 @@ import javax.persistence.Column;
 import javax.persistence.Entity;
 import javax.persistence.EnumType;
 import javax.persistence.Enumerated;
+import javax.persistence.FetchType;
 import javax.persistence.Id;
 import javax.persistence.JoinColumn;
 import javax.persistence.Lob;
@@ -67,6 +68,16 @@ public class RequestEntity {
   @Basic
   private String commandName;
 
+  /**
+   * On large clusters, this value can be in the 10,000's of kilobytes. During
+   * an upgrade, all stages are loaded in memory for every request, which can
+   * lead to an OOM. As a result, lazy load this since it's barely ever
+   * requested or used.
+   */
+  @Column(name = "cluster_host_info")
+  @Basic(fetch = FetchType.LAZY)
+  private byte[] clusterHostInfo;
+
   @Column(name = "inputs")
   @Lob
   private byte[] inputs = new byte[0];
@@ -151,6 +162,14 @@ public class RequestEntity {
     this.stages = stages;
   }
 
+  public String getClusterHostInfo() {
+    return clusterHostInfo == null ? "{}" : new String(clusterHostInfo);
+  }
+
+  public void setClusterHostInfo(String clusterHostInfo) {
+    this.clusterHostInfo = clusterHostInfo.getBytes();
+  }
+
   public Long getCreateTime() {
     return createTime;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity.java
index f688412..6ee0a3b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity.java
@@ -96,16 +96,6 @@ public class StageEntity {
    * lead to an OOM. As a result, lazy load this since it's barely ever
    * requested or used.
    */
-  @Column(name = "cluster_host_info")
-  @Basic(fetch = FetchType.LAZY)
-  private byte[] clusterHostInfo;
-
-  /**
-   * On large clusters, this value can be in the 10,000's of kilobytes. During
-   * an upgrade, all stages are loaded in memory for every request, which can
-   * lead to an OOM. As a result, lazy load this since it's barely ever
-   * requested or used.
-   */
   @Column(name = "command_params")
   @Basic(fetch = FetchType.LAZY)
   private byte[] commandParamsStage;
@@ -187,14 +177,6 @@ public class StageEntity {
     return defaultString(requestContext);
   }
 
-  public String getClusterHostInfo() {
-    return clusterHostInfo == null ? new String() : new String(clusterHostInfo);
-  }
-
-  public void setClusterHostInfo(String clusterHostInfo) {
-    this.clusterHostInfo = clusterHostInfo.getBytes();
-  }
-
   public String getCommandParamsStage() {
     return commandParamsStage == null ? new String() : new String(commandParamsStage);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity_.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity_.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity_.java
index dc39e55..637a18b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity_.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity_.java
@@ -41,7 +41,6 @@ public class StageEntity_ {
   public static volatile SingularAttribute<StageEntity, String> logInfo;
   public static volatile SingularAttribute<StageEntity, String> requestContext;
 
-  public static volatile SingularAttribute<StageEntity, byte[]> clusterHostInfo;
   public static volatile SingularAttribute<StageEntity, byte[]> commandParamsStage;
   public static volatile SingularAttribute<StageEntity, byte[]> hostParamsStage;
 
@@ -74,9 +73,6 @@ public class StageEntity_ {
     mapping.put(StageResourceProvider.STAGE_CONTEXT,
         Collections.singletonList(requestContext));
 
-    mapping.put(StageResourceProvider.STAGE_CLUSTER_HOST_INFO,
-        Collections.singletonList(clusterHostInfo));
-
     mapping.put(StageResourceProvider.STAGE_COMMAND_PARAMS,
         Collections.singletonList(commandParamsStage));
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java
index d374ddc..7f74bb0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java
@@ -127,6 +127,27 @@ public interface DbmsHelper {
   String getSetNullableStatement(String tableName, DBAccessor.DBColumnInfo columnInfo, boolean nullable);
 
   /**
+   * Get's the {@code UPDATE} statement for {@code sourceTable} for copy column from {@code targetTable} by matching
+   * table keys {@code sourceIDColumnName} and {@code targetIDColumnName}
+   *
+   * @param sourceTable
+   *          the source table name
+   * @param sourceColumnName
+   *          the source column name
+   * @param sourceIDColumnName
+   *          source key id column which would be used to math right rows for {@code targetTable}
+   * @param targetTable
+   *          the destination table name
+   * @param targetColumnName
+   *          the destination column name
+   * @param targetIDColumnName
+   *          destination key id column name which should math {@code sourceIDColumnName}
+   * @return
+   */
+  String getCopyColumnToAnotherTableStatement(String sourceTable, String sourceColumnName, String sourceIDColumnName,
+                                              String targetTable, String targetColumnName, String targetIDColumnName);
+
+  /**
    * Gets whether the database platform supports adding contraints after the
    * {@code NULL} constraint. Some database, such as Oracle, don't allow this.
    * Unfortunately, EclipsLink hard codes the order of constraints.

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
index f60c138..7e3092d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
@@ -78,6 +78,14 @@ public class GenericDbmsHelper implements DbmsHelper {
     return stringBuilder.toString();
   }
 
+  /**
+   {@inheritDoc}
+   */
+  @Override
+  public String getCopyColumnToAnotherTableStatement(String sourceTable, String sourceColumnName, String sourceIDColumnName, String targetTable, String targetColumnName, String targetIDColumnName) {
+    throw new UnsupportedOperationException("Column copy is not supported for generic DB");
+  }
+
   public StringBuilder writeAlterTableClause(StringBuilder builder, String tableName) {
     builder.append("ALTER TABLE ").append(tableName).append(" ");
     return builder;

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/MySqlHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/MySqlHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/MySqlHelper.java
index c693be5..0daea72 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/MySqlHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/MySqlHelper.java
@@ -93,4 +93,16 @@ public class MySqlHelper extends GenericDbmsHelper {
     }
     return defaultWriter;
   }
+
+  /**
+   {@inheritDoc}
+   */
+  @Override
+  public String getCopyColumnToAnotherTableStatement(String sourceTable, String sourceColumnName,
+         String sourceIDColumnName, String targetTable, String targetColumnName, String targetIDColumnName) {
+
+    return String.format("UPDATE %1$s AS a INNER JOIN %2$s AS b ON a.%5$s = b.%6$s SET a.%3$s = b.%4$s",
+      targetTable, sourceTable, targetColumnName, sourceColumnName, targetIDColumnName, sourceIDColumnName);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/OracleHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/OracleHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/OracleHelper.java
index b5955b4..73356d1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/OracleHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/OracleHelper.java
@@ -79,4 +79,16 @@ public class OracleHelper extends GenericDbmsHelper {
   public boolean isConstraintSupportedAfterNullability() {
     return false;
   }
+
+  /**
+   {@inheritDoc}
+   */
+  @Override
+  public String getCopyColumnToAnotherTableStatement(String sourceTable, String sourceColumnName,
+         String sourceIDColumnName, String targetTable, String targetColumnName, String targetIDColumnName) {
+
+    // sub-query should return only one value, ROWNUM is safe-guard for this
+    return String.format("UPDATE %1$s a SET (a.%3$s) = (SELECT b.%4$s FROM %2$s b WHERE b.%6$s = a.%5$s and ROWNUM < 2)",
+      targetTable, sourceTable, targetColumnName, sourceColumnName, targetIDColumnName, sourceIDColumnName);
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/PostgresHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/PostgresHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/PostgresHelper.java
index 2237f86..37c1184 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/PostgresHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/PostgresHelper.java
@@ -44,6 +44,18 @@ public class PostgresHelper extends GenericDbmsHelper {
     return builder;
   }
 
+  /**
+   {@inheritDoc}
+   */
+  @Override
+  public String getCopyColumnToAnotherTableStatement(String sourceTable, String sourceColumnName,
+         String sourceIDColumnName, String targetTable, String targetColumnName, String targetIDColumnName) {
+
+    return String.format("UPDATE %1$s AS a SET %3$s = b.%4$s FROM %2$s AS b WHERE a.%5$s = b.%6$s",
+      targetTable, sourceTable, targetColumnName, sourceColumnName, targetIDColumnName, sourceIDColumnName);
+  }
+
+
   @Override
   public StringBuilder writeSetNullableString(StringBuilder builder,
       String tableName, DBAccessor.DBColumnInfo columnInfo, boolean nullable) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java b/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java
index b54c7c7..b6b756b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java
@@ -283,7 +283,7 @@ public class RoleGraph {
     Stage newStage = stageFactory.createNew(origStage.getRequestId(),
         origStage.getLogDir(), origStage.getClusterName(),
         origStage.getClusterId(),
-        origStage.getRequestContext(), origStage.getClusterHostInfo(),
+        origStage.getRequestContext(),
         origStage.getCommandParamsStage(), origStage.getHostParamsStage());
     newStage.setSuccessFactors(origStage.getSuccessFactors());
     newStage.setSkippable(origStage.isSkippable());

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
index 745890c..5ed33a8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
@@ -33,6 +33,8 @@ import org.apache.commons.lang.StringUtils;
 
 import com.google.inject.Inject;
 import com.google.inject.Injector;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The {@link UpgradeCatalog251} upgrades Ambari from 2.5.0 to 2.5.1.
@@ -44,6 +46,17 @@ public class UpgradeCatalog251 extends AbstractUpgradeCatalog {
 
   protected static final String KAFKA_BROKER_CONFIG = "kafka-broker";
 
+  private static final String STAGE_TABLE = "stage";
+  private static final String REQUEST_TABLE = "request";
+  private static final String CLUSTER_HOST_INFO_COLUMN = "cluster_host_info";
+  private static final String REQUEST_ID_COLUMN = "request_id";
+
+
+  /**
+   * Logger.
+   */
+  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog251.class);
+
   /**
    * Constructor.
    *
@@ -76,6 +89,7 @@ public class UpgradeCatalog251 extends AbstractUpgradeCatalog {
   @Override
   protected void executeDDLUpdates() throws AmbariException, SQLException {
     addBackgroundColumnToHostRoleCommand();
+    moveClusterHostColumnFromStageToRequest();
   }
 
   /**
@@ -136,4 +150,19 @@ public class UpgradeCatalog251 extends AbstractUpgradeCatalog {
     dbAccessor.addColumn(HOST_ROLE_COMMAND_TABLE,
         new DBColumnInfo(HRC_IS_BACKGROUND_COLUMN, Short.class, null, 0, false));
   }
+
+  /**
+   * Moves the {@value #CLUSTER_HOST_INFO_COLUMN} column from {@value #STAGE_TABLE} table to the
+   * {@value #REQUEST_TABLE} table
+   *
+   *
+   * @throws SQLException
+   */
+  private void moveClusterHostColumnFromStageToRequest() throws SQLException {
+    DBColumnInfo sourceColumn = new DBColumnInfo(CLUSTER_HOST_INFO_COLUMN, byte[].class, null, null, false);
+    DBColumnInfo targetColumn = new DBColumnInfo(CLUSTER_HOST_INFO_COLUMN, byte[].class, null, null, false);
+
+    dbAccessor.moveColumnToAnotherTable(STAGE_TABLE, sourceColumn, REQUEST_ID_COLUMN, REQUEST_TABLE, targetColumn,
+      REQUEST_ID_COLUMN, false);
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
index 6a88aea..f184f37 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
@@ -194,20 +194,20 @@ public class StageUtils {
     return requestStageIds;
   }
 
-  public static Stage getATestStage(long requestId, long stageId, String clusterHostInfo, String commandParamsStage, String hostParamsStage) {
+  public static Stage getATestStage(long requestId, long stageId, String commandParamsStage, String hostParamsStage) {
     String hostname;
     try {
       hostname = InetAddress.getLocalHost().getHostName();
     } catch (UnknownHostException e) {
       hostname = "host-dummy";
     }
-    return getATestStage(requestId, stageId, hostname, clusterHostInfo, commandParamsStage, hostParamsStage);
+    return getATestStage(requestId, stageId, hostname, commandParamsStage, hostParamsStage);
   }
 
   //For testing only
   @Inject
-  public static Stage getATestStage(long requestId, long stageId, String hostname, String clusterHostInfo, String commandParamsStage, String hostParamsStage) {
-    Stage s = stageFactory.createNew(requestId, "/tmp", "cluster1", 1L, "context", clusterHostInfo, commandParamsStage, hostParamsStage);
+  public static Stage getATestStage(long requestId, long stageId, String hostname, String commandParamsStage, String hostParamsStage) {
+    Stage s = stageFactory.createNew(requestId, "/tmp", "cluster1", 1L, "context", commandParamsStage, hostParamsStage);
     s.setStageId(stageId);
     long now = System.currentTimeMillis();
     s.addHostRoleExecutionCommand(hostname, Role.NAMENODE, RoleCommand.INSTALL,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
index ece6600..15670f3 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
@@ -341,6 +341,7 @@ CREATE TABLE request (
   start_time BIGINT NOT NULL,
   status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
   display_status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
+  cluster_host_info BLOB NOT NULL,
   CONSTRAINT PK_request PRIMARY KEY (request_id),
   CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id));
 
@@ -352,7 +353,6 @@ CREATE TABLE stage (
   supports_auto_skip_failure SMALLINT DEFAULT 0 NOT NULL,
   log_info VARCHAR(255) NOT NULL,
   request_context VARCHAR(255),
-  cluster_host_info BLOB NOT NULL,
   command_params BLOB,
   host_params BLOB,
   command_execution_type VARCHAR(32) NOT NULL DEFAULT 'STAGE',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
index e0f2ef9..7e41399 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
@@ -362,6 +362,7 @@ CREATE TABLE request (
   start_time BIGINT NOT NULL,
   status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
   display_status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
+  cluster_host_info LONGBLOB,
   CONSTRAINT PK_request PRIMARY KEY (request_id),
   CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id));
 
@@ -373,7 +374,6 @@ CREATE TABLE stage (
   supports_auto_skip_failure SMALLINT DEFAULT 0 NOT NULL,
   log_info VARCHAR(255) NOT NULL,
   request_context VARCHAR(255),
-  cluster_host_info LONGBLOB,
   command_params LONGBLOB,
   host_params LONGBLOB,
   command_execution_type VARCHAR(32) NOT NULL DEFAULT 'STAGE',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
index 6d0f856..4d0274f 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
@@ -342,6 +342,7 @@ CREATE TABLE request (
   start_time NUMBER(19) NOT NULL,
   status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
   display_status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
+  cluster_host_info BLOB NOT NULL,
   CONSTRAINT PK_request PRIMARY KEY (request_id),
   CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id));
 
@@ -353,7 +354,6 @@ CREATE TABLE stage (
   supports_auto_skip_failure NUMBER(1) DEFAULT 0 NOT NULL,
   log_info VARCHAR2(255) NULL,
   request_context VARCHAR2(255) NULL,
-  cluster_host_info BLOB NOT NULL,
   command_params BLOB,
   host_params BLOB,
   command_execution_type VARCHAR2(32) DEFAULT 'STAGE' NOT NULL,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
index 49b956b..cc933fa 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
@@ -341,6 +341,7 @@ CREATE TABLE request (
   start_time BIGINT NOT NULL,
   status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
   display_status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
+  cluster_host_info BYTEA NOT NULL,
   CONSTRAINT PK_request PRIMARY KEY (request_id),
   CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id));
 
@@ -352,7 +353,6 @@ CREATE TABLE stage (
   supports_auto_skip_failure SMALLINT DEFAULT 0 NOT NULL,
   log_info VARCHAR(255) NOT NULL,
   request_context VARCHAR(255),
-  cluster_host_info BYTEA NOT NULL,
   command_params BYTEA,
   host_params BYTEA,
   command_execution_type VARCHAR(32) DEFAULT 'STAGE' NOT NULL,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
index c594a2e..5fc14d4 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
@@ -340,6 +340,7 @@ CREATE TABLE request (
   start_time NUMERIC(19) NOT NULL,
   status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
   display_status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
+  cluster_host_info IMAGE,
   CONSTRAINT PK_request PRIMARY KEY (request_id),
   CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id));
 
@@ -351,7 +352,6 @@ CREATE TABLE stage (
   supports_auto_skip_failure SMALLINT DEFAULT 0 NOT NULL,
   log_info VARCHAR(255) NOT NULL,
   request_context VARCHAR(255),
-  cluster_host_info IMAGE,
   command_params IMAGE,
   host_params IMAGE,
   command_execution_type VARCHAR(32) NOT NULL DEFAULT 'STAGE',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
index 77459a6..12e66f9 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
@@ -346,6 +346,7 @@ CREATE TABLE request (
   start_time BIGINT NOT NULL,
   status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
   display_status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
+  cluster_host_info VARBINARY(MAX) NOT NULL,
   CONSTRAINT PK_request PRIMARY KEY CLUSTERED (request_id),
   CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id));
 
@@ -357,7 +358,6 @@ CREATE TABLE stage (
   supports_auto_skip_failure SMALLINT DEFAULT 0 NOT NULL,
   log_info VARCHAR(255) NOT NULL,
   request_context VARCHAR(255),
-  cluster_host_info VARBINARY(MAX) NOT NULL,
   command_params VARBINARY(MAX),
   host_params VARBINARY(MAX),
   command_execution_type VARCHAR(32) NOT NULL DEFAULT 'STAGE',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/main/resources/properties.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/properties.json b/ambari-server/src/main/resources/properties.json
index e536d05..e0f81e6 100644
--- a/ambari-server/src/main/resources/properties.json
+++ b/ambari-server/src/main/resources/properties.json
@@ -139,6 +139,7 @@
         "Requests/abort_reason",
         "Requests/remove_pending_host_requests",
         "Requests/pending_host_request_count",
+        "Requests/cluster_host_info",
         "_"
     ],
     "RequestSchedule" : [

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
index 89ec32b..8165da5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
@@ -149,7 +149,7 @@ public class ExecutionCommandWrapperTest {
 
   private static void createTask(ActionDBAccessor db, long requestId, long stageId, String hostName, String clusterName) throws AmbariException {
 
-    Stage s = stageFactory.createNew(requestId, "/var/log", clusterName, 1L, "execution command wrapper test", "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+    Stage s = stageFactory.createNew(requestId, "/var/log", clusterName, 1L, "execution command wrapper test", "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     s.addHostRoleExecutionCommand(hostName, Role.NAMENODE,
         RoleCommand.START,
@@ -157,7 +157,7 @@ public class ExecutionCommandWrapperTest {
             hostName, System.currentTimeMillis()), clusterName, "HDFS", false, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "clusterHostInfo", clusters);
     db.persistActions(request);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/StageTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/StageTest.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/StageTest.java
index 89627f7..214aee1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/StageTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/StageTest.java
@@ -66,7 +66,7 @@ public class StageTest {
 
   @Test
   public void testAddServerActionCommand_userName() throws Exception {
-    final Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 978, "context", CLUSTER_HOST_INFO,
+    final Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 978, "context",
         "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
 
     stage.addServerActionCommand(ConfigureAction.class.getName(),


[39/50] [abbrv] ambari git commit: AMBARI-21067. Atlas config values not getting populated on BP cluster install with strategy : NEVER_APPLY (magyari_sandor)

Posted by ad...@apache.org.
AMBARI-21067. Atlas config values not getting populated on BP cluster install with strategy : NEVER_APPLY (magyari_sandor)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/74972de5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/74972de5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/74972de5

Branch: refs/heads/ambari-rest-api-explorer
Commit: 74972de559b21633288c03785dc672e792a172bb
Parents: fd4a7a4
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Fri May 19 09:06:24 2017 +0200
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Mon May 22 14:27:38 2017 +0200

----------------------------------------------------------------------
 .../0.7.0.2.5/configuration/application-properties.xml    | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/74972de5/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.2.5/configuration/application-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.2.5/configuration/application-properties.xml b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.2.5/configuration/application-properties.xml
index 70af02c..f34d8be 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.2.5/configuration/application-properties.xml
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.2.5/configuration/application-properties.xml
@@ -24,7 +24,7 @@
   <!-- Misc properties -->
   <property>
     <name>atlas.audit.hbase.zookeeper.quorum</name>
-    <value/>
+    <value>localhost</value>
     <description/>
     <on-ambari-upgrade add="false"/>
   </property>
@@ -38,7 +38,7 @@
   </property>
   <property>
     <name>atlas.graph.storage.hostname</name>
-    <value/>
+    <value>localhost</value>
     <description/>
     <on-ambari-upgrade add="false"/>
   </property>
@@ -66,7 +66,7 @@
   </property>
   <property>
     <name>atlas.graph.index.search.solr.zookeeper-url</name>
-    <value/>
+    <value>localhost:2181/infra-solr</value>
     <description>The ZooKeeper quorum setup for Solr as comma separated value.</description>
     <on-ambari-upgrade add="false"/>
   </property>
@@ -145,7 +145,7 @@
   </property>
   <property>
     <name>atlas.kafka.bootstrap.servers</name>
-    <value/>
+    <value>localhost:6667</value>
     <description>Comma separated list of Kafka broker endpoints in host:port form</description>
     <depends-on>
      <property>
@@ -157,7 +157,7 @@
   </property>
   <property>
     <name>atlas.kafka.zookeeper.connect</name>
-    <value/>
+    <value>localhost:2181</value>
     <description>Comma separated list of servers forming Zookeeper quorum used by Kafka.</description>
     <on-ambari-upgrade add="false"/>
   </property>


[05/50] [abbrv] ambari git commit: AMBARI-20758 Aggregate local metrics for minute aggregation time window (dsen)

Posted by ad...@apache.org.
AMBARI-20758 Aggregate local metrics for minute aggregation time window (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/041d353b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/041d353b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/041d353b

Branch: refs/heads/ambari-rest-api-explorer
Commit: 041d353b0d75b20b0322097e13a1701226e6fc97
Parents: 772be78
Author: Dmytro Sen <ds...@apache.org>
Authored: Wed May 17 19:38:29 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Wed May 17 19:38:29 2017 +0300

----------------------------------------------------------------------
 .../logfeeder/metrics/LogFeederAMSClient.java   |  12 +-
 ambari-metrics/ambari-metrics-assembly/pom.xml  |  20 +++
 .../src/main/assembly/monitor-windows.xml       |   7 +
 .../src/main/assembly/monitor.xml               |   9 +-
 .../timeline/AbstractTimelineMetricsSink.java   |  24 ++-
 .../sink/timeline/AggregationResult.java        |  60 +++++++
 .../metrics2/sink/timeline/MetricAggregate.java | 110 ++++++++++++
 .../sink/timeline/MetricClusterAggregate.java   |  73 ++++++++
 .../sink/timeline/MetricHostAggregate.java      |  81 +++++++++
 .../metrics2/sink/timeline/TimelineMetric.java  |   6 +-
 .../TimelineMetricWithAggregatedValues.java     |  65 +++++++
 .../AbstractTimelineMetricSinkTest.java         |  10 ++
 .../availability/MetricCollectorHATest.java     |  10 ++
 .../cache/HandleConnectExceptionTest.java       |  10 ++
 .../sink/flume/FlumeTimelineMetricsSink.java    |  16 ++
 .../timeline/HadoopTimelineMetricsSink.java     |  20 ++-
 .../conf/unix/log4j.properties                  |  31 ++++
 .../conf/windows/log4j.properties               |  29 +++
 .../ambari-metrics-host-aggregator/pom.xml      | 120 +++++++++++++
 .../AbstractMetricPublisherThread.java          | 134 ++++++++++++++
 .../aggregator/AggregatedMetricsPublisher.java  | 101 +++++++++++
 .../host/aggregator/AggregatorApplication.java  | 180 +++++++++++++++++++
 .../host/aggregator/AggregatorWebService.java   |  56 ++++++
 .../host/aggregator/RawMetricsPublisher.java    |  60 +++++++
 .../host/aggregator/TimelineMetricsHolder.java  |  98 ++++++++++
 .../conf/unix/ambari-metrics-monitor            |   2 +-
 .../src/main/python/core/aggregator.py          | 110 ++++++++++++
 .../src/main/python/core/config_reader.py       |  35 +++-
 .../src/main/python/core/controller.py          |  28 +++
 .../src/main/python/core/emitter.py             |   8 +-
 .../src/main/python/core/stop_handler.py        |   3 +-
 .../src/main/python/main.py                     |   6 +-
 .../kafka/KafkaTimelineMetricsReporter.java     |  17 ++
 .../storm/StormTimelineMetricsReporter.java     |  14 ++
 .../sink/storm/StormTimelineMetricsSink.java    |  14 ++
 .../storm/StormTimelineMetricsReporter.java     |  16 ++
 .../sink/storm/StormTimelineMetricsSink.java    |  16 ++
 .../timeline/HBaseTimelineMetricStore.java      |  29 ++-
 .../metrics/timeline/PhoenixHBaseAccessor.java  |   4 +-
 .../timeline/TimelineMetricConfiguration.java   |   2 +
 .../metrics/timeline/TimelineMetricStore.java   |   2 +
 .../timeline/TimelineMetricsAggregatorSink.java |   4 +-
 .../timeline/aggregators/MetricAggregate.java   | 110 ------------
 .../aggregators/MetricClusterAggregate.java     |  73 --------
 .../aggregators/MetricHostAggregate.java        |  81 ---------
 .../TimelineMetricAppAggregator.java            |   1 +
 .../TimelineMetricClusterAggregator.java        |   2 +
 .../TimelineMetricClusterAggregatorSecond.java  |   1 +
 .../TimelineMetricHostAggregator.java           |   1 +
 .../aggregators/TimelineMetricReadHelper.java   |   2 +
 .../webapp/TimelineWebServices.java             |  31 ++++
 .../timeline/ITPhoenixHBaseAccessor.java        |   4 +-
 .../metrics/timeline/MetricTestHelper.java      |   2 +-
 .../timeline/PhoenixHBaseAccessorTest.java      |   4 +-
 .../timeline/TestMetricHostAggregate.java       |   8 +-
 .../timeline/TestTimelineMetricStore.java       |   6 +
 .../TimelineMetricsAggregatorMemorySink.java    |   4 +-
 .../aggregators/ITClusterAggregator.java        |   4 +-
 .../aggregators/ITMetricAggregator.java         |  13 +-
 ...melineMetricClusterAggregatorSecondTest.java |   1 +
 ambari-metrics/pom.xml                          |   1 +
 .../system/impl/AmbariMetricSinkImpl.java       |  10 ++
 .../1.6.1.2.2.0/package/scripts/params.py       |   2 +
 .../hadoop-metrics2-accumulo.properties.j2      |   3 +
 .../0.1.0/configuration/ams-env.xml             |   8 +
 .../0.1.0/configuration/ams-site.xml            |  11 ++
 .../AMBARI_METRICS/0.1.0/metainfo.xml           |   3 +
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py |  30 ++++
 .../0.1.0/package/scripts/params.py             |   5 +
 .../hadoop-metrics2-hbase.properties.j2         |   3 +
 .../package/templates/metric_monitor.ini.j2     |   7 +
 .../FLUME/1.4.0.2.0/package/scripts/params.py   |   3 +
 .../templates/flume-metrics2.properties.j2      |   2 +
 .../0.96.0.2.0/package/scripts/params_linux.py  |   3 +
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |   2 +
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |   2 +
 .../hadoop-metrics2.properties.xml              |   2 +
 .../0.12.0.2.0/package/scripts/params_linux.py  |   2 +
 .../hadoop-metrics2-hivemetastore.properties.j2 |   2 +
 .../hadoop-metrics2-hiveserver2.properties.j2   |   2 +
 .../templates/hadoop-metrics2-llapdaemon.j2     |   2 +
 .../hadoop-metrics2-llaptaskscheduler.j2        |   2 +
 .../2.1.0.3.0/package/scripts/params_linux.py   |   3 +
 .../hadoop-metrics2-hivemetastore.properties.j2 |   2 +
 .../hadoop-metrics2-hiveserver2.properties.j2   |   2 +
 .../templates/hadoop-metrics2-llapdaemon.j2     |   2 +
 .../hadoop-metrics2-llaptaskscheduler.j2        |   2 +
 .../KAFKA/0.8.1/configuration/kafka-broker.xml  |  11 ++
 .../KAFKA/0.8.1/package/scripts/params.py       |   3 +
 .../STORM/0.9.1/package/scripts/params_linux.py |   2 +
 .../0.9.1/package/templates/config.yaml.j2      |   2 +
 .../templates/storm-metrics2.properties.j2      |   2 +
 .../2.0.6/hooks/before-START/scripts/params.py  |   3 +
 .../templates/hadoop-metrics2.properties.j2     |   2 +
 .../hadoop-metrics2.properties.xml              |   2 +
 .../3.0/hooks/before-START/scripts/params.py    |   2 +
 .../templates/hadoop-metrics2.properties.j2     |   2 +
 .../system/impl/TestAmbariMetricsSinkImpl.java  |  10 ++
 .../2.0/hooks/before-START/scripts/params.py    |   2 +
 99 files changed, 1854 insertions(+), 307 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java
index 2d1bf40..39526a5 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java
@@ -89,6 +89,16 @@ public class LogFeederAMSClient extends AbstractTimelineMetricsSink {
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return false;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return 0;
+  }
+
+  @Override
   protected boolean emitMetrics(TimelineMetrics metrics) {
     return super.emitMetrics(metrics);
   }
@@ -103,4 +113,4 @@ public class LogFeederAMSClient extends AbstractTimelineMetricsSink {
     return collectorPort;
   }
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-assembly/pom.xml b/ambari-metrics/ambari-metrics-assembly/pom.xml
index a4b87de..6b81de5 100644
--- a/ambari-metrics/ambari-metrics-assembly/pom.xml
+++ b/ambari-metrics/ambari-metrics-assembly/pom.xml
@@ -35,6 +35,7 @@
   <properties>
     <collector.dir>${project.basedir}/../ambari-metrics-timelineservice</collector.dir>
     <monitor.dir>${project.basedir}/../ambari-metrics-host-monitoring</monitor.dir>
+    <aggregator.dir>${project.basedir}/../ambari-metrics-host-aggregator</aggregator.dir>
     <grafana.dir>${project.basedir}/../ambari-metrics-grafana</grafana.dir>
     <hadoop-sink.dir>${project.basedir}/../ambari-metrics-hadoop-sink</hadoop-sink.dir>
     <storm-sink.dir>${project.basedir}/../ambari-metrics-storm-sink</storm-sink.dir>
@@ -599,6 +600,19 @@
                       </sources>
                     </mapping>
                     <mapping>
+                      <directory>/var/lib/ambari-metrics-monitor/lib</directory>
+                      <sources>
+                        <source>
+                          <location>
+                            ${aggregator.dir}/target/
+                          </location>
+                          <includes>
+                            <include>ambari-metrics-host-aggregator-${project.version}.jar</include>
+                          </includes>
+                        </source>
+                      </sources>
+                    </mapping>
+                    <mapping>
                       <directory>/etc/ambari-metrics-monitor/conf</directory>
                       <configuration>true</configuration>
                     </mapping>
@@ -744,6 +758,7 @@
                     <path>/var/run/ambari-metrics-grafana</path>
                     <path>/var/log/ambari-metrics-grafana</path>
                     <path>/var/lib/ambari-metrics-collector</path>
+                    <path>/var/lib/ambari-metrics-monitor/lib</path>
                     <path>/var/lib/ambari-metrics-grafana</path>
                     <path>/usr/lib/ambari-metrics-hadoop-sink</path>
                     <path>/usr/lib/ambari-metrics-kafka-sink</path>
@@ -1331,6 +1346,11 @@
       <type>pom</type>
       <optional>true</optional>
     </dependency>
+    <dependency>
+      <groupId>org.apache.ambari</groupId>
+      <artifactId>ambari-metrics-host-aggregator</artifactId>
+      <version>${project.version}</version>
+    </dependency>
   </dependencies>
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor-windows.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor-windows.xml b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor-windows.xml
index ab309a1..d015d31 100644
--- a/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor-windows.xml
+++ b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor-windows.xml
@@ -64,6 +64,13 @@
       </includes>
     </fileSet>
     <fileSet>
+      <directory>${aggregator.dir}/conf/windows</directory>
+      <outputDirectory>conf</outputDirectory>
+      <includes>
+        <include>log4j.properties</include>
+      </includes>
+    </fileSet>
+    <fileSet>
       <directory>${monitor.dir}/conf/windows</directory>
       <outputDirectory>/</outputDirectory>
       <includes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor.xml b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor.xml
index 99a41c3..448fe62 100644
--- a/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor.xml
+++ b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor.xml
@@ -46,6 +46,13 @@
       </includes>
     </fileSet>
     <fileSet>
+      <directory>${aggregator.dir}/conf/unix</directory>
+      <outputDirectory>conf</outputDirectory>
+      <includes>
+        <include>log4j.properties</include>
+      </includes>
+    </fileSet>
+    <fileSet>
       <directory>${monitor.dir}/conf/unix</directory>
       <outputDirectory>bin</outputDirectory>
       <includes>
@@ -68,4 +75,4 @@
 
 
 
-</assembly>
\ No newline at end of file
+</assembly>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
index 2c6fae2..a8dc571 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
@@ -78,6 +78,8 @@ public abstract class AbstractTimelineMetricsSink {
   public static final String SSL_KEYSTORE_PATH_PROPERTY = "truststore.path";
   public static final String SSL_KEYSTORE_TYPE_PROPERTY = "truststore.type";
   public static final String SSL_KEYSTORE_PASSWORD_PROPERTY = "truststore.password";
+  public static final String HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY = "host_in_memory_aggregation";
+  public static final String HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY = "host_in_memory_aggregation_port";
   public static final String COLLECTOR_LIVE_NODES_PATH = "/ws/v1/timeline/metrics/livenodes";
   public static final String INSTANCE_ID_PROPERTY = "instanceId";
   public static final String SET_INSTANCE_ID_PROPERTY = "set.instanceId";
@@ -241,8 +243,14 @@ public abstract class AbstractTimelineMetricsSink {
   }
 
   protected boolean emitMetrics(TimelineMetrics metrics) {
-    String collectorHost = getCurrentCollectorHost();
-    String connectUrl = getCollectorUri(collectorHost);
+    String connectUrl;
+    if (isHostInMemoryAggregationEnabled()) {
+      connectUrl = constructTimelineMetricUri("http", "localhost", String.valueOf(getHostInMemoryAggregationPort()));
+    } else {
+      String collectorHost  = getCurrentCollectorHost();
+      connectUrl = getCollectorUri(collectorHost);
+    }
+
     String jsonData = null;
     LOG.debug("EmitMetrics connectUrl = "  + connectUrl);
     try {
@@ -562,4 +570,16 @@ public abstract class AbstractTimelineMetricsSink {
    * @return String "host1"
    */
   abstract protected String getHostname();
+
+  /**
+   * Check if host in-memory aggregation is enabled
+   * @return
+   */
+  abstract protected boolean isHostInMemoryAggregationEnabled();
+
+  /**
+   * In memory aggregation port
+   * @return
+   */
+  abstract protected int getHostInMemoryAggregationPort();
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AggregationResult.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AggregationResult.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AggregationResult.java
new file mode 100644
index 0000000..c903e3d
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AggregationResult.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.Set;
+
+@XmlRootElement(name="AggregationResult")
+public class AggregationResult {
+    protected Set<TimelineMetricWithAggregatedValues> result;
+    protected Long timeInMilis;
+
+    @Override
+    public String toString() {
+        return "AggregationResult{" +
+                "result=" + result +
+                ", timeInMilis=" + timeInMilis +
+                '}';
+    }
+
+    public AggregationResult() {
+    }
+
+    public AggregationResult(Set<TimelineMetricWithAggregatedValues> result, Long timeInMilis) {
+        this.result = result;
+        this.timeInMilis = timeInMilis;
+    }
+    @XmlElement
+    public Set<TimelineMetricWithAggregatedValues> getResult() {
+        return result;
+    }
+
+    public void setResult(Set<TimelineMetricWithAggregatedValues> result) {
+        this.result = result;
+    }
+    @XmlElement
+    public Long getTimeInMilis() {
+        return timeInMilis;
+    }
+
+    public void setTimeInMilis(Long timeInMilis) {
+        this.timeInMilis = timeInMilis;
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricAggregate.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricAggregate.java
new file mode 100644
index 0000000..84cba0e
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricAggregate.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.annotate.JsonSubTypes;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.IOException;
+
+/**
+*
+*/
+@JsonSubTypes({@JsonSubTypes.Type(value = MetricClusterAggregate.class),
+  @JsonSubTypes.Type(value = MetricHostAggregate.class)})
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class MetricAggregate {
+  private static final ObjectMapper mapper = new ObjectMapper();
+
+  protected Double sum = 0.0;
+  protected Double deviation;
+  protected Double max = Double.MIN_VALUE;
+  protected Double min = Double.MAX_VALUE;
+
+  public MetricAggregate() {
+  }
+
+  MetricAggregate(Double sum, Double deviation, Double max,
+                  Double min) {
+    this.sum = sum;
+    this.deviation = deviation;
+    this.max = max;
+    this.min = min;
+  }
+
+  public void updateSum(Double sum) {
+    this.sum += sum;
+  }
+
+  public void updateMax(Double max) {
+    if (max > this.max) {
+      this.max = max;
+    }
+  }
+
+  public void updateMin(Double min) {
+    if (min < this.min) {
+      this.min = min;
+    }
+  }
+
+  @JsonProperty("sum")
+  public Double getSum() {
+    return sum;
+  }
+
+  @JsonProperty("deviation")
+  public Double getDeviation() {
+    return deviation;
+  }
+
+  @JsonProperty("max")
+  public Double getMax() {
+    return max;
+  }
+
+  @JsonProperty("min")
+  public Double getMin() {
+    return min;
+  }
+
+  public void setSum(Double sum) {
+    this.sum = sum;
+  }
+
+  public void setDeviation(Double deviation) {
+    this.deviation = deviation;
+  }
+
+  public void setMax(Double max) {
+    this.max = max;
+  }
+
+  public void setMin(Double min) {
+    this.min = min;
+  }
+
+  public String toJSON() throws IOException {
+    return mapper.writeValueAsString(this);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricClusterAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricClusterAggregate.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricClusterAggregate.java
new file mode 100644
index 0000000..7ef2c1d
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricClusterAggregate.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+
+import org.codehaus.jackson.annotate.JsonCreator;
+import org.codehaus.jackson.annotate.JsonProperty;
+
+/**
+*
+*/
+public class MetricClusterAggregate extends MetricAggregate {
+  private int numberOfHosts;
+
+  @JsonCreator
+  public MetricClusterAggregate() {
+  }
+
+  public MetricClusterAggregate(Double sum, int numberOfHosts, Double deviation,
+                         Double max, Double min) {
+    super(sum, deviation, max, min);
+    this.numberOfHosts = numberOfHosts;
+  }
+
+  @JsonProperty("numberOfHosts")
+  public int getNumberOfHosts() {
+    return numberOfHosts;
+  }
+
+  public void updateNumberOfHosts(int count) {
+    this.numberOfHosts += count;
+  }
+
+  public void setNumberOfHosts(int numberOfHosts) {
+    this.numberOfHosts = numberOfHosts;
+  }
+
+  /**
+   * Find and update min, max and avg for a minute
+   */
+  public void updateAggregates(MetricClusterAggregate hostAggregate) {
+    updateMax(hostAggregate.getMax());
+    updateMin(hostAggregate.getMin());
+    updateSum(hostAggregate.getSum());
+    updateNumberOfHosts(hostAggregate.getNumberOfHosts());
+  }
+
+  @Override
+  public String toString() {
+    return "MetricAggregate{" +
+      "sum=" + sum +
+      ", numberOfHosts=" + numberOfHosts +
+      ", deviation=" + deviation +
+      ", max=" + max +
+      ", min=" + min +
+      '}';
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricHostAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricHostAggregate.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricHostAggregate.java
new file mode 100644
index 0000000..e190913
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricHostAggregate.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+
+import org.codehaus.jackson.annotate.JsonCreator;
+import org.codehaus.jackson.annotate.JsonProperty;
+
+/**
+ * Represents a collection of minute based aggregation of values for
+ * resolution greater than a minute.
+ */
+public class MetricHostAggregate extends MetricAggregate {
+
+  private long numberOfSamples = 0;
+
+  @JsonCreator
+  public MetricHostAggregate() {
+    super(0.0, 0.0, Double.MIN_VALUE, Double.MAX_VALUE);
+  }
+
+  public MetricHostAggregate(Double sum, int numberOfSamples,
+                             Double deviation,
+                             Double max, Double min) {
+    super(sum, deviation, max, min);
+    this.numberOfSamples = numberOfSamples;
+  }
+
+  @JsonProperty("numberOfSamples")
+  public long getNumberOfSamples() {
+    return numberOfSamples == 0 ? 1 : numberOfSamples;
+  }
+
+  public void updateNumberOfSamples(long count) {
+    this.numberOfSamples += count;
+  }
+
+  public void setNumberOfSamples(long numberOfSamples) {
+    this.numberOfSamples = numberOfSamples;
+  }
+
+  public double calculateAverage() {
+    return sum / numberOfSamples;
+  }
+
+  /**
+   * Find and update min, max and avg for a minute
+   */
+  public void updateAggregates(MetricHostAggregate hostAggregate) {
+    updateMax(hostAggregate.getMax());
+    updateMin(hostAggregate.getMin());
+    updateSum(hostAggregate.getSum());
+    updateNumberOfSamples(hostAggregate.getNumberOfSamples());
+  }
+
+  @Override
+  public String toString() {
+    return "MetricHostAggregate{" +
+      "sum=" + sum +
+      ", numberOfSamples=" + numberOfSamples +
+      ", deviation=" + deviation +
+      ", max=" + max +
+      ", min=" + min +
+      '}';
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
index 44c9d4a..edace52 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
@@ -45,7 +45,7 @@ public class TimelineMetric implements Comparable<TimelineMetric> {
   private String type;
   private String units;
   private TreeMap<Long, Double> metricValues = new TreeMap<Long, Double>();
-  private Map<String, String> metadata = new HashMap<>();
+  private HashMap<String, String> metadata = new HashMap<>();
 
   // default
   public TimelineMetric() {
@@ -151,11 +151,11 @@ public class TimelineMetric implements Comparable<TimelineMetric> {
   }
 
   @XmlElement(name = "metadata")
-  public Map<String,String> getMetadata () {
+  public HashMap<String,String> getMetadata () {
     return metadata;
   }
 
-  public void setMetadata (Map<String,String> metadata) {
+  public void setMetadata (HashMap<String,String> metadata) {
     this.metadata = metadata;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricWithAggregatedValues.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricWithAggregatedValues.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricWithAggregatedValues.java
new file mode 100644
index 0000000..626ac5f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricWithAggregatedValues.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+
+@XmlRootElement(name = "TimelineMetricWithAggregatedValues")
+@XmlAccessorType(XmlAccessType.NONE)
+public class TimelineMetricWithAggregatedValues {
+    private TimelineMetric timelineMetric;
+    private MetricHostAggregate metricAggregate;
+
+    public TimelineMetricWithAggregatedValues() {
+    }
+
+    public TimelineMetricWithAggregatedValues(TimelineMetric metric, MetricHostAggregate metricAggregate) {
+        timelineMetric = metric;
+        this.metricAggregate = metricAggregate;
+    }
+
+    @XmlElement
+    public MetricHostAggregate getMetricAggregate() {
+        return metricAggregate;
+    }
+    @XmlElement
+    public TimelineMetric getTimelineMetric() {
+        return timelineMetric;
+    }
+
+    public void setTimelineMetric(TimelineMetric timelineMetric) {
+        this.timelineMetric = timelineMetric;
+    }
+
+    public void setMetricAggregate(MetricHostAggregate metricAggregate) {
+        this.metricAggregate = metricAggregate;
+    }
+
+    @Override
+    public String toString() {
+        return "TimelineMetricWithAggregatedValues{" +
+                "timelineMetric=" + timelineMetric +
+                ", metricAggregate=" + metricAggregate +
+                '}';
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java
index 9b0cdbe..ce2cf79 100644
--- a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java
+++ b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java
@@ -90,6 +90,16 @@ public class AbstractTimelineMetricSinkTest {
     }
 
     @Override
+    protected boolean isHostInMemoryAggregationEnabled() {
+      return true;
+    }
+
+    @Override
+    protected int getHostInMemoryAggregationPort() {
+      return 61888;
+    }
+
+    @Override
     public boolean emitMetrics(TimelineMetrics metrics) {
       super.init();
       return super.emitMetrics(metrics);

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
index a393a96..f0174d5 100644
--- a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
+++ b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
@@ -192,5 +192,15 @@ public class MetricCollectorHATest {
     protected String getHostname() {
       return "h1";
     }
+
+    @Override
+    protected boolean isHostInMemoryAggregationEnabled() {
+      return true;
+    }
+
+    @Override
+    protected int getHostInMemoryAggregationPort() {
+      return 61888;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
index 32fe32e..4eb75eb 100644
--- a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
+++ b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
@@ -125,6 +125,16 @@ public class HandleConnectExceptionTest {
     }
 
     @Override
+    protected boolean isHostInMemoryAggregationEnabled() {
+      return false;
+    }
+
+    @Override
+    protected int getHostInMemoryAggregationPort() {
+      return 61888;
+    }
+
+    @Override
     public boolean emitMetrics(TimelineMetrics metrics) {
       super.init();
       return super.emitMetrics(metrics);

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
index 904c916..6277907 100644
--- a/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
@@ -63,6 +63,9 @@ public class FlumeTimelineMetricsSink extends AbstractTimelineMetricsSink implem
   private int timeoutSeconds = 10;
   private boolean setInstanceId;
   private String instanceId;
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
+
 
   @Override
   public void start() {
@@ -110,6 +113,9 @@ public class FlumeTimelineMetricsSink extends AbstractTimelineMetricsSink implem
     port = configuration.getProperty(COLLECTOR_PORT, "6188");
     setInstanceId = Boolean.valueOf(configuration.getProperty(SET_INSTANCE_ID_PROPERTY, "false"));
     instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY, "");
+
+    hostInMemoryAggregationEnabled = Boolean.getBoolean(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY));
+    hostInMemoryAggregationPort = Integer.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY));
     // Initialize the collector write strategy
     super.init();
 
@@ -162,6 +168,16 @@ public class FlumeTimelineMetricsSink extends AbstractTimelineMetricsSink implem
     return hostname;
   }
 
+  @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
   public void setPollFrequency(long pollFrequency) {
     this.pollFrequency = pollFrequency;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
index 11e16c2..c235c7c 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
@@ -75,6 +75,8 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
       return t;
     }
   });
+  private int hostInMemoryAggregationPort;
+  private boolean hostInMemoryAggregationEnabled;
 
   @Override
   public void init(SubsetConfiguration conf) {
@@ -107,7 +109,8 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
     protocol = conf.getString(COLLECTOR_PROTOCOL, "http");
     collectorHosts = parseHostsStringArrayIntoCollection(conf.getStringArray(COLLECTOR_HOSTS_PROPERTY));
     port = conf.getString(COLLECTOR_PORT, "6188");
-
+    hostInMemoryAggregationEnabled = conf.getBoolean(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY);
+    hostInMemoryAggregationPort = conf.getInt(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY);
     if (collectorHosts.isEmpty()) {
       LOG.error("No Metric collector configured.");
     } else {
@@ -249,6 +252,16 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
+  @Override
   public void putMetrics(MetricsRecord record) {
     try {
       String recordName = record.name();
@@ -308,9 +321,10 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
 
       int sbBaseLen = sb.length();
       List<TimelineMetric> metricList = new ArrayList<TimelineMetric>();
-      Map<String, String> metadata = null;
+      HashMap<String, String> metadata = null;
       if (skipAggregation) {
-        metadata = Collections.singletonMap("skipAggregation", "true");
+        metadata = new HashMap<>();
+        metadata.put("skipAggregation", "true");
       }
       long startTime = record.timestamp();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/conf/unix/log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/conf/unix/log4j.properties b/ambari-metrics/ambari-metrics-host-aggregator/conf/unix/log4j.properties
new file mode 100644
index 0000000..d7ceedd
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/conf/unix/log4j.properties
@@ -0,0 +1,31 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Define some default values that can be overridden by system properties
+# Root logger option
+log4j.rootLogger=INFO,file
+
+# Direct log messages to a log file
+log4j.appender.file=org.apache.log4j.RollingFileAppender
+log4j.appender.file.File=/var/log/ambari-metrics-monitor/ambari-metrics-aggregator.log
+log4j.appender.file.MaxFileSize=80MB
+log4j.appender.file.MaxBackupIndex=60
+log4j.appender.file.layout=org.apache.log4j.PatternLayout
+log4j.appender.file.layout.ConversionPattern=%d{ABSOLUTE} %5p [%t] %c{1}:%L - %m%n
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/conf/windows/log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/conf/windows/log4j.properties b/ambari-metrics/ambari-metrics-host-aggregator/conf/windows/log4j.properties
new file mode 100644
index 0000000..d9aabab
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/conf/windows/log4j.properties
@@ -0,0 +1,29 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Define some default values that can be overridden by system properties
+# Root logger option
+log4j.rootLogger=INFO,file
+
+# Direct log messages to a log file
+log4j.appender.file=org.apache.log4j.RollingFileAppender
+log4j.appender.file.File=\\var\\log\\ambari-metrics-monitor\\ambari-metrics-aggregator.log
+log4j.appender.file.MaxFileSize=80MB
+log4j.appender.file.MaxBackupIndex=60
+log4j.appender.file.layout=org.apache.log4j.PatternLayout
+log4j.appender.file.layout.ConversionPattern=%d{ABSOLUTE} %5p [%t] %c{1}:%L - %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/pom.xml b/ambari-metrics/ambari-metrics-host-aggregator/pom.xml
new file mode 100644
index 0000000..c2c7897
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/pom.xml
@@ -0,0 +1,120 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <artifactId>ambari-metrics</artifactId>
+        <groupId>org.apache.ambari</groupId>
+        <version>2.0.0.0-SNAPSHOT</version>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>ambari-metrics-host-aggregator</artifactId>
+    <packaging>jar</packaging>
+
+    <name>ambari-metrics-host-aggregator</name>
+    <url>http://maven.apache.org</url>
+
+    <properties>
+        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>3.8.1</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+            <version>14.0.1</version>
+        </dependency>
+        <dependency>
+              <groupId>org.apache.ambari</groupId>
+              <artifactId>ambari-metrics-common</artifactId>
+              <version>2.0.0.0-SNAPSHOT</version>
+        </dependency>
+        <dependency>
+            <groupId>javax.servlet</groupId>
+            <artifactId>servlet-api</artifactId>
+            <version>2.5</version>
+        </dependency>
+        <dependency>
+            <groupId>com.sun.jersey</groupId>
+            <artifactId>jersey-json</artifactId>
+            <version>1.11</version>
+        </dependency>
+        <dependency>
+            <groupId>com.sun.jersey</groupId>
+            <artifactId>jersey-server</artifactId>
+            <version>1.11</version>
+        </dependency>
+        <dependency>
+            <groupId>javax.xml.bind</groupId>
+            <artifactId>jaxb-api</artifactId>
+            <version>2.2.2</version>
+        </dependency>
+        <dependency>
+            <groupId>com.sun.jersey</groupId>
+            <artifactId>jersey-core</artifactId>
+            <version>1.11</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-common</artifactId>
+            <version>2.7.1.2.3.4.0-3347</version>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-shade-plugin</artifactId>
+                <version>1.6</version>
+                <configuration>
+                    <createDependencyReducedPom>true</createDependencyReducedPom>
+                    <filters>
+                        <filter>
+                            <artifact>*:*</artifact>
+                            <excludes>
+                                <exclude>META-INF/*.SF</exclude>
+                                <exclude>META-INF/*.DSA</exclude>
+                                <exclude>META-INF/*.RSA</exclude>
+                            </excludes>
+                        </filter>
+                    </filters>
+                </configuration>
+
+                <executions>
+                    <execution>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>shade</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+
+</project>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AbstractMetricPublisherThread.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AbstractMetricPublisherThread.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AbstractMetricPublisherThread.java
new file mode 100644
index 0000000..b1f60fa
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AbstractMetricPublisherThread.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.codehaus.jackson.map.AnnotationIntrospector;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.codehaus.jackson.xc.JaxbAnnotationIntrospector;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.util.Map;
+
+/**
+ * Abstract class that runs a thread that publishes metrics data to AMS collector in specified intervals.
+ */
+public abstract class AbstractMetricPublisherThread extends Thread {
+    protected int publishIntervalInSeconds;
+    protected String publishURL;
+    protected ObjectMapper objectMapper;
+    private Log LOG;
+    protected TimelineMetricsHolder timelineMetricsHolder;
+
+    public AbstractMetricPublisherThread(TimelineMetricsHolder timelineMetricsHolder, String publishURL, int publishIntervalInSeconds) {
+        LOG = LogFactory.getLog(this.getClass());
+        this.publishURL = publishURL;
+        this.publishIntervalInSeconds = publishIntervalInSeconds;
+        this.timelineMetricsHolder = timelineMetricsHolder;
+        objectMapper = new ObjectMapper();
+        AnnotationIntrospector introspector = new JaxbAnnotationIntrospector();
+        objectMapper.setAnnotationIntrospector(introspector);
+        objectMapper.getSerializationConfig()
+                .withSerializationInclusion(JsonSerialize.Inclusion.NON_NULL);
+    }
+
+    /**
+     * Publishes metrics to collector in specified intervals while not interrupted.
+     */
+    @Override
+    public void run() {
+        while (!isInterrupted()) {
+            try {
+                sleep(this.publishIntervalInSeconds * 1000);
+            } catch (InterruptedException e) {
+                //Ignore
+            }
+            try {
+                processAndPublishMetrics(getMetricsFromCache());
+            } catch (Exception e) {
+                LOG.error("Couldn't process and send metrics : ",e);
+            }
+        }
+    }
+
+    /**
+     * Processes and sends metrics to collector.
+     * @param metricsFromCache
+     * @throws Exception
+     */
+    protected void processAndPublishMetrics(Map<Long, TimelineMetrics> metricsFromCache) throws Exception {
+        if (metricsFromCache.size()==0) return;
+
+        LOG.info(String.format("Preparing %s timeline metrics for publishing", metricsFromCache.size()));
+        publishMetricsJson(processMetrics(metricsFromCache));
+    }
+
+    /**
+     * Returns metrics map. Source is based on implementation.
+     * @return
+     */
+    protected abstract Map<Long,TimelineMetrics> getMetricsFromCache();
+
+    /**
+     * Processes given metrics (aggregates or merges them) and converts them into json string that will be send to collector
+     * @param metricValues
+     * @return
+     */
+    protected abstract String processMetrics(Map<Long, TimelineMetrics> metricValues);
+
+    protected void publishMetricsJson(String jsonData) throws Exception {
+        int timeout = 5 * 1000;
+        HttpURLConnection connection = null;
+        if (this.publishURL == null) {
+            throw new IOException("Unknown URL. Unable to connect to metrics collector.");
+        }
+        LOG.info("Collector URL : " + publishURL);
+        connection = (HttpURLConnection) new URL(this.publishURL).openConnection();
+
+        connection.setRequestMethod("POST");
+        connection.setRequestProperty("Content-Type", "application/json");
+        connection.setRequestProperty("Connection", "Keep-Alive");
+        connection.setConnectTimeout(timeout);
+        connection.setReadTimeout(timeout);
+        connection.setDoOutput(true);
+
+        if (jsonData != null) {
+            try (OutputStream os = connection.getOutputStream()) {
+                os.write(jsonData.getBytes("UTF-8"));
+            }
+        }
+        int responseCode = connection.getResponseCode();
+        if (responseCode != 200) {
+            throw new Exception("responseCode is " + responseCode);
+        }
+        LOG.info("Successfully sent metrics.");
+    }
+
+    /**
+     * Interrupts the thread.
+     */
+    protected void stopPublisher() {
+        this.interrupt();
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatedMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatedMetricsPublisher.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatedMetricsPublisher.java
new file mode 100644
index 0000000..0540ec9
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatedMetricsPublisher.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.sink.timeline.AggregationResult;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricWithAggregatedValues;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+/**
+ * Thread that aggregates and publishes metrics to collector on specified interval.
+ */
+public class AggregatedMetricsPublisher extends AbstractMetricPublisherThread {
+
+    private Log LOG;
+
+    public AggregatedMetricsPublisher(TimelineMetricsHolder timelineMetricsHolder, String collectorURL, int interval) {
+        super(timelineMetricsHolder, collectorURL, interval);
+        LOG = LogFactory.getLog(this.getClass());
+    }
+
+    /**
+     * get metrics map form @TimelineMetricsHolder
+     * @return
+     */
+    @Override
+    protected Map<Long, TimelineMetrics> getMetricsFromCache() {
+        return timelineMetricsHolder.extractMetricsForAggregationPublishing();
+    }
+
+    /**
+     * Aggregates given metrics and converts them into json string that will be send to collector
+     * @param metricForAggregationValues
+     * @return
+     */
+    @Override
+    protected String processMetrics(Map<Long, TimelineMetrics> metricForAggregationValues) {
+        HashMap<String, TimelineMetrics> nameToMetricMap = new HashMap<>();
+        for (TimelineMetrics timelineMetrics : metricForAggregationValues.values()) {
+            for (TimelineMetric timelineMetric : timelineMetrics.getMetrics()) {
+                if (!nameToMetricMap.containsKey(timelineMetric.getMetricName())) {
+                    nameToMetricMap.put(timelineMetric.getMetricName(), new TimelineMetrics());
+                }
+                nameToMetricMap.get(timelineMetric.getMetricName()).addOrMergeTimelineMetric(timelineMetric);
+            }
+        }
+        Set<TimelineMetricWithAggregatedValues> metricAggregateMap = new HashSet<>();
+        for (TimelineMetrics metrics : nameToMetricMap.values()) {
+            double sum = 0;
+            double max = Integer.MIN_VALUE;
+            double min = Integer.MAX_VALUE;
+            int count = 0;
+            for (TimelineMetric metric : metrics.getMetrics()) {
+                for (Double value : metric.getMetricValues().values()) {
+                    sum+=value;
+                    max = Math.max(max, value);
+                    min = Math.min(min, value);
+                    count++;
+                }
+            }
+            TimelineMetric tmpMetric = new TimelineMetric(metrics.getMetrics().get(0));
+            tmpMetric.setMetricValues(new TreeMap<Long, Double>());
+            metricAggregateMap.add(new TimelineMetricWithAggregatedValues(tmpMetric, new MetricHostAggregate(sum, count, 0d, max, min)));
+        }
+        String json = null;
+        try {
+            json = objectMapper.writeValueAsString(new AggregationResult(metricAggregateMap, System.currentTimeMillis()));
+            LOG.debug(json);
+        } catch (Exception e) {
+            LOG.error("Failed to convert result into json", e);
+        }
+
+        return json;
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorApplication.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorApplication.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorApplication.java
new file mode 100644
index 0000000..c6b703b
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorApplication.java
@@ -0,0 +1,180 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+import com.sun.jersey.api.container.httpserver.HttpServerFactory;
+import com.sun.jersey.api.core.PackagesResourceConfig;
+import com.sun.jersey.api.core.ResourceConfig;
+import com.sun.net.httpserver.HttpServer;
+
+import javax.ws.rs.core.UriBuilder;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.URI;
+import java.net.URL;
+import java.net.UnknownHostException;
+import java.util.HashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * WEB application with 2 publisher threads that processes received metrics and submits results to the collector
+ */
+public class AggregatorApplication
+{
+    private static final int STOP_SECONDS_DELAY = 0;
+    private static final int JOIN_SECONDS_TIMEOUT = 2;
+    private static String BASE_POST_URL = "%s://%s:%s/ws/v1/timeline/metrics";
+    private static String AGGREGATED_POST_PREFIX = "/aggregated";
+    private static final String METRICS_SITE_CONFIGURATION_FILE = "ams-site.xml";
+    private static Log LOG = LogFactory.getLog("AggregatorApplication.class");
+    private final int webApplicationPort;
+    private final int rawPublishingInterval;
+    private final int aggregationInterval;
+    private Configuration configuration;
+    private String [] collectorHosts;
+    private AggregatedMetricsPublisher aggregatePublisher;
+    private RawMetricsPublisher rawPublisher;
+    private TimelineMetricsHolder timelineMetricsHolder;
+    private HttpServer httpServer;
+
+    public AggregatorApplication(String collectorHosts) {
+        initConfiguration();
+        this.collectorHosts = collectorHosts.split(",");
+        this.aggregationInterval = configuration.getInt("timeline.metrics.host.aggregator.minute.interval", 300);
+        this.rawPublishingInterval = configuration.getInt("timeline.metrics.sink.report.interval", 60);
+        this.webApplicationPort = configuration.getInt("timeline.metrics.host.inmemory.aggregation.port", 61888);
+        this.timelineMetricsHolder = TimelineMetricsHolder.getInstance(rawPublishingInterval, aggregationInterval);
+        try {
+            this.httpServer = createHttpServer();
+        } catch (IOException e) {
+            LOG.error("Exception while starting HTTP server. Exiting", e);
+            System.exit(1);
+        }
+    }
+
+    private void initConfiguration() {
+        ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+        if (classLoader == null) {
+            classLoader = getClass().getClassLoader();
+        }
+
+        URL amsResUrl = classLoader.getResource(METRICS_SITE_CONFIGURATION_FILE);
+        LOG.info("Found metric service configuration: " + amsResUrl);
+        if (amsResUrl == null) {
+            throw new IllegalStateException("Unable to initialize the metrics " +
+                    "subsystem. No ams-site present in the classpath.");
+        }
+        configuration = new Configuration(true);
+        try {
+            configuration.addResource(amsResUrl.toURI().toURL());
+        } catch (Exception e) {
+            LOG.error("Couldn't init configuration. ", e);
+            System.exit(1);
+        }
+    }
+
+    private String getHostName() {
+        String hostName = "localhost";
+        try {
+            hostName = InetAddress.getLocalHost().getCanonicalHostName();
+        } catch (UnknownHostException e) {
+            LOG.error(e);
+        }
+        return hostName;
+    }
+
+    private URI getURI() {
+        URI uri = UriBuilder.fromUri("http://" + getHostName() + "/").port(this.webApplicationPort).build();
+        LOG.info(String.format("Web server at %s", uri));
+        return uri;
+    }
+
+    private HttpServer createHttpServer() throws IOException {
+        ResourceConfig resourceConfig = new PackagesResourceConfig("org.apache.hadoop.metrics2.host.aggregator");
+        HashMap<String, Object> params = new HashMap();
+        params.put("com.sun.jersey.api.json.POJOMappingFeature", "true");
+        resourceConfig.setPropertiesAndFeatures(params);
+        return HttpServerFactory.create(getURI(), resourceConfig);
+    }
+
+    private void startWebServer() {
+        LOG.info("Starting web server.");
+        this.httpServer.start();
+    }
+
+    private void startAggregatePublisherThread() {
+        LOG.info("Starting aggregated metrics publisher.");
+        String collectorURL = buildBasicCollectorURL(collectorHosts[0]) + AGGREGATED_POST_PREFIX;
+        aggregatePublisher = new AggregatedMetricsPublisher(timelineMetricsHolder, collectorURL, aggregationInterval);
+        aggregatePublisher.start();
+    }
+
+    private void startRawPublisherThread() {
+        LOG.info("Starting raw metrics publisher.");
+        String collectorURL = buildBasicCollectorURL(collectorHosts[0]);
+        rawPublisher = new RawMetricsPublisher(timelineMetricsHolder, collectorURL, rawPublishingInterval);
+        rawPublisher.start();
+    }
+
+
+
+    private void stop() {
+        aggregatePublisher.stopPublisher();
+        rawPublisher.stopPublisher();
+        httpServer.stop(STOP_SECONDS_DELAY);
+        LOG.info("Stopped web server.");
+        try {
+            LOG.info("Waiting for threads to join.");
+            aggregatePublisher.join(JOIN_SECONDS_TIMEOUT * 1000);
+            rawPublisher.join(JOIN_SECONDS_TIMEOUT * 1000);
+            LOG.info("Gracefully stopped Aggregator Application.");
+        } catch (InterruptedException e) {
+            LOG.error("Received exception during stop : ", e);
+
+        }
+
+    }
+
+    private String buildBasicCollectorURL(String host) {
+        String port = configuration.get("timeline.metrics.service.webapp.address", "0.0.0.0:6188").split(":")[1];
+        String protocol = configuration.get("timeline.metrics.service.http.policy", "HTTP_ONLY").equalsIgnoreCase("HTTP_ONLY") ? "http" : "https";
+        return String.format(BASE_POST_URL, protocol, host, port);
+    }
+
+    public static void main( String[] args ) throws Exception {
+        LOG.info("Starting aggregator application");
+        if (args.length != 1) {
+            throw new Exception("This jar should be run with 1 argument - collector hosts separated with coma");
+        }
+
+        final AggregatorApplication app = new AggregatorApplication(args[0]);
+        app.startAggregatePublisherThread();
+        app.startRawPublisherThread();
+        app.startWebServer();
+
+        Runtime.getRuntime().addShutdownHook(new Thread() {
+            public void run() {
+                LOG.info("Stopping aggregator application");
+                app.stop();
+            }
+        });
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorWebService.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorWebService.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorWebService.java
new file mode 100644
index 0000000..f96d0ed
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorWebService.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+
+
+import com.sun.jersey.spi.resource.Singleton;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.io.IOException;
+
+@Singleton
+@Path("/ws/v1/timeline")
+public class AggregatorWebService {
+    TimelineMetricsHolder metricsHolder = TimelineMetricsHolder.getInstance();
+
+    @GET
+    @Produces("text/json")
+    @Path("/metrics")
+    public Response helloWorld() throws IOException {
+        return Response.ok().build();
+    }
+
+    @POST
+    @Produces(MediaType.TEXT_PLAIN)
+    @Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+    @Path("/metrics")
+    public Response postMetrics(
+            TimelineMetrics metrics) {
+        metricsHolder.putMetricsForAggregationPublishing(metrics);
+        metricsHolder.putMetricsForRawPublishing(metrics);
+        return Response.ok().build();
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/RawMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/RawMetricsPublisher.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/RawMetricsPublisher.java
new file mode 100644
index 0000000..f317ed9
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/RawMetricsPublisher.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+
+import java.util.Map;
+
+public class RawMetricsPublisher extends AbstractMetricPublisherThread {
+    private final Log LOG;
+
+    public RawMetricsPublisher(TimelineMetricsHolder timelineMetricsHolder, String collectorURL, int interval) {
+        super(timelineMetricsHolder, collectorURL, interval);
+        LOG = LogFactory.getLog(this.getClass());
+    }
+
+
+    @Override
+    protected Map<Long, TimelineMetrics> getMetricsFromCache() {
+        return timelineMetricsHolder.extractMetricsForRawPublishing();
+    }
+
+    @Override
+    protected String processMetrics(Map<Long, TimelineMetrics> metricValues) {
+        //merge everything in one TimelineMetrics object
+        TimelineMetrics timelineMetrics = new TimelineMetrics();
+        for (TimelineMetrics metrics : metricValues.values()) {
+            for (TimelineMetric timelineMetric : metrics.getMetrics())
+                timelineMetrics.addOrMergeTimelineMetric(timelineMetric);
+        }
+        //map TimelineMetrics to json string
+        String json = null;
+        try {
+            json = objectMapper.writeValueAsString(timelineMetrics);
+            LOG.debug(json);
+        } catch (Exception e) {
+            LOG.error("Failed to convert result into json", e);
+        }
+        return json;
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/TimelineMetricsHolder.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/TimelineMetricsHolder.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/TimelineMetricsHolder.java
new file mode 100644
index 0000000..b355c97
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/TimelineMetricsHolder.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * Singleton class with 2 guava caches for raw and aggregated metrics storing
+ */
+public class TimelineMetricsHolder {
+    private static final int DEFAULT_RAW_CACHE_EXPIRE_TIME = 60;
+    private static final int DEFAULT_AGGREGATION_CACHE_EXPIRE_TIME = 300;
+    private Cache<Long, TimelineMetrics> aggregationMetricsCache;
+    private Cache<Long, TimelineMetrics> rawMetricsCache;
+    private static TimelineMetricsHolder instance = null;
+    //to ensure no metric values are expired
+    private static int EXPIRE_DELAY = 30;
+    ReadWriteLock aggregationCacheLock = new ReentrantReadWriteLock();
+    ReadWriteLock rawCacheLock = new ReentrantReadWriteLock();
+
+    private TimelineMetricsHolder(int rawCacheExpireTime, int aggregationCacheExpireTime) {
+        this.rawMetricsCache = CacheBuilder.newBuilder().expireAfterWrite(rawCacheExpireTime + EXPIRE_DELAY, TimeUnit.SECONDS).build();
+        this.aggregationMetricsCache = CacheBuilder.newBuilder().expireAfterWrite(aggregationCacheExpireTime + EXPIRE_DELAY, TimeUnit.SECONDS).build();
+    }
+
+    public static TimelineMetricsHolder getInstance(int rawCacheExpireTime, int aggregationCacheExpireTime) {
+        if (instance == null) {
+            instance = new TimelineMetricsHolder(rawCacheExpireTime, aggregationCacheExpireTime);
+        }
+        return instance;
+    }
+
+    /**
+     * Uses default expiration time for caches initialization if they are not initialized yet.
+     * @return
+     */
+    public static TimelineMetricsHolder getInstance() {
+        return getInstance(DEFAULT_RAW_CACHE_EXPIRE_TIME, DEFAULT_AGGREGATION_CACHE_EXPIRE_TIME);
+    }
+
+    public void putMetricsForAggregationPublishing(TimelineMetrics timelineMetrics) {
+        aggregationCacheLock.writeLock().lock();
+        aggregationMetricsCache.put(System.currentTimeMillis(), timelineMetrics);
+        aggregationCacheLock.writeLock().unlock();
+    }
+
+    public Map<Long, TimelineMetrics> extractMetricsForAggregationPublishing() {
+        return extractMetricsFromCacheWithLock(aggregationMetricsCache, aggregationCacheLock);
+    }
+
+    public void putMetricsForRawPublishing(TimelineMetrics metrics) {
+        rawCacheLock.writeLock().lock();
+        rawMetricsCache.put(System.currentTimeMillis(), metrics);
+        rawCacheLock.writeLock().unlock();
+    }
+
+    public Map<Long, TimelineMetrics> extractMetricsForRawPublishing() {
+        return extractMetricsFromCacheWithLock(rawMetricsCache, rawCacheLock);
+    }
+
+    /**
+     * Returns values from cache and clears the cache
+     * @param cache
+     * @param lock
+     * @return
+     */
+    private Map<Long, TimelineMetrics> extractMetricsFromCacheWithLock(Cache<Long, TimelineMetrics> cache, ReadWriteLock lock) {
+        lock.writeLock().lock();
+        Map<Long, TimelineMetrics> metricsMap = new TreeMap<>(cache.asMap());
+        cache.invalidateAll();
+        lock.writeLock().unlock();
+        return metricsMap;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor b/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor
index 967e133..9bbb271 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor
+++ b/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor
@@ -24,7 +24,7 @@ METRIC_MONITOR_PY_SCRIPT=${RESOURCE_MONITORING_DIR}/main.py
 PIDFILE=/var/run/ambari-metrics-monitor/ambari-metrics-monitor.pid
 OUTFILE=/var/log/ambari-metrics-monitor/ambari-metrics-monitor.out
 
-STOP_TIMEOUT=5
+STOP_TIMEOUT=10
 
 OK=0
 NOTOK=1

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/aggregator.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/aggregator.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/aggregator.py
new file mode 100644
index 0000000..2249e53
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/aggregator.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import threading
+import subprocess
+import logging
+import urllib2
+
+logger = logging.getLogger()
+class Aggregator(threading.Thread):
+  def __init__(self, config, stop_handler):
+    threading.Thread.__init__(self)
+    self._config = config
+    self._stop_handler = stop_handler
+    self._aggregator_process = None
+    self._sleep_interval = config.get_collector_sleep_interval()
+    self.stopped = False
+
+  def run(self):
+    java_home = self._config.get_java_home()
+    collector_hosts = self._config.get_metrics_collector_hosts_as_string()
+    jvm_agrs = self._config.get_aggregator_jvm_agrs()
+    config_dir = self._config.get_config_dir()
+    class_name = "org.apache.hadoop.metrics2.host.aggregator.AggregatorApplication"
+    ams_log_file = "ambari-metrics-aggregator.log"
+    additional_classpath = ':{0}'.format(config_dir)
+    ams_log_dir = self._config.ams_monitor_log_dir()
+    logger.info('Starting Aggregator thread.')
+    cmd = "{0}/bin/java {1} -Dams.log.dir={2} -Dams.log.file={3} -cp /var/lib/ambari-metrics-monitor/lib/*{4} {5} {6}"\
+      .format(java_home, jvm_agrs, ams_log_dir, ams_log_file, additional_classpath, class_name, collector_hosts)
+
+    logger.info("Executing : {0}".format(cmd))
+
+    self._aggregator_process = subprocess.Popen([cmd], stdout = None, stderr = None, shell = True)
+    while not self.stopped:
+      if 0 == self._stop_handler.wait(self._sleep_interval):
+        break
+    pass
+    self.stop()
+
+  def stop(self):
+    self.stopped = True
+    if self._aggregator_process :
+      logger.info('Stopping Aggregator thread.')
+      self._aggregator_process.terminate()
+
+class AggregatorWatchdog(threading.Thread):
+  SLEEP_TIME = 30
+  CONNECTION_TIMEOUT = 5
+  AMS_AGGREGATOR_METRICS_CHECK_URL = "/ws/v1/timeline/metrics/"
+  def __init__(self, config, stop_handler):
+    threading.Thread.__init__(self)
+    self._config = config
+    self._stop_handler = stop_handler
+    self.URL = 'http://localhost:' + self._config.get_inmemory_aggregation_port() + self.AMS_AGGREGATOR_METRICS_CHECK_URL
+    self._is_ok = threading.Event()
+    self.set_is_ok(True)
+    self.stopped = False
+
+  def run(self):
+    logger.info('Starting Aggregator Watchdog thread.')
+    while not self.stopped:
+      if 0 == self._stop_handler.wait(self.SLEEP_TIME):
+        break
+      try:
+        conn = urllib2.urlopen(self.URL, timeout=self.CONNECTION_TIMEOUT)
+        self.set_is_ok(True)
+      except (KeyboardInterrupt, SystemExit):
+        raise
+      except Exception, e:
+        self.set_is_ok(False)
+        continue
+      if conn.code != 200:
+        self.set_is_ok(False)
+        continue
+      conn.close()
+
+  def is_ok(self):
+    return self._is_ok.is_set()
+
+  def set_is_ok(self, value):
+    if value == False and self.is_ok() != value:
+      logger.warning("Watcher couldn't connect to aggregator.")
+      self._is_ok.clear()
+    else:
+      self._is_ok.set()
+
+
+  def stop(self):
+    logger.info('Stopping watcher thread.')
+    self.stopped = True
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
index 2670e76..d1429ed 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
@@ -30,6 +30,8 @@ from ambari_commons.os_family_impl import OsFamilyImpl
 # Abstraction for OS-dependent configuration defaults
 #
 class ConfigDefaults(object):
+  def get_config_dir(self):
+    pass
   def get_config_file_path(self):
     pass
   def get_metric_file_path(self):
@@ -40,11 +42,14 @@ class ConfigDefaults(object):
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
 class ConfigDefaultsWindows(ConfigDefaults):
   def __init__(self):
+    self._CONFIG_DIR = "conf"
     self._CONFIG_FILE_PATH = "conf\\metric_monitor.ini"
     self._METRIC_FILE_PATH = "conf\\metric_groups.conf"
     self._METRIC_FILE_PATH = "conf\\ca.pem"
     pass
 
+  def get_config_dir(self):
+    return self._CONFIG_DIR
   def get_config_file_path(self):
     return self._CONFIG_FILE_PATH
   def get_metric_file_path(self):
@@ -55,11 +60,13 @@ class ConfigDefaultsWindows(ConfigDefaults):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class ConfigDefaultsLinux(ConfigDefaults):
   def __init__(self):
+    self._CONFIG_DIR = "/etc/ambari-metrics-monitor/conf/"
     self._CONFIG_FILE_PATH = "/etc/ambari-metrics-monitor/conf/metric_monitor.ini"
     self._METRIC_FILE_PATH = "/etc/ambari-metrics-monitor/conf/metric_groups.conf"
     self._CA_CERTS_FILE_PATH = "/etc/ambari-metrics-monitor/conf/ca.pem"
     pass
-
+  def get_config_dir(self):
+    return self._CONFIG_DIR
   def get_config_file_path(self):
     return self._CONFIG_FILE_PATH
   def get_metric_file_path(self):
@@ -71,6 +78,7 @@ configDefaults = ConfigDefaults()
 
 config = ConfigParser.RawConfigParser()
 
+CONFIG_DIR = configDefaults.get_config_dir()
 CONFIG_FILE_PATH = configDefaults.get_config_file_path()
 METRIC_FILE_PATH = configDefaults.get_metric_file_path()
 CA_CERTS_FILE_PATH = configDefaults.get_ca_certs_file_path()
@@ -191,6 +199,8 @@ class Configuration:
         # No hostname script identified in the ambari agent conf
         pass
     pass
+  def get_config_dir(self):
+    return CONFIG_DIR
 
   def getConfig(self):
     return self.config
@@ -214,10 +224,14 @@ class Configuration:
   def get_hostname_config(self):
     return self.get("default", "hostname", None)
 
-  def get_metrics_collector_hosts(self):
+  def get_metrics_collector_hosts_as_list(self):
     hosts = self.get("default", "metrics_servers", "localhost")
     return hosts.split(",")
 
+  def get_metrics_collector_hosts_as_string(self):
+    hosts = self.get("default", "metrics_servers", "localhost")
+    return hosts
+
   def get_failover_strategy(self):
     return self.get("collector", "failover_strategy", ROUND_ROBIN_FAILOVER_STRATEGY)
 
@@ -239,6 +253,23 @@ class Configuration:
   def is_server_https_enabled(self):
     return "true" == str(self.get("collector", "https_enabled")).lower()
 
+  def get_java_home(self):
+    return self.get("aggregation", "java_home")
+
+  def is_inmemory_aggregation_enabled(self):
+    return "true" == str(self.get("aggregation", "host_in_memory_aggregation")).lower()
+
+  def get_inmemory_aggregation_port(self):
+    return self.get("aggregation", "host_in_memory_aggregation_port")
+
+  def get_aggregator_jvm_agrs(self):
+    hosts = self.get("aggregation", "jvm_arguments", "-Xmx256m -Xms128m -XX:PermSize=68m")
+    return hosts
+
+  def ams_monitor_log_dir(self):
+    hosts = self.get("aggregation", "ams_monitor_log_dir", "/var/log/ambari-metrics-monitor")
+    return hosts
+
   def is_set_instanceid(self):
     return "true" == str(self.get("default", "set.instanceId", 'false')).lower()
 


[44/50] [abbrv] ambari git commit: AMBARI-21033 ADDENDUM - Log Search use POJOs for input configuration (oleewere)

Posted by ad...@apache.org.
AMBARI-21033 ADDENDUM - Log Search use POJOs for input configuration (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0626b789
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0626b789
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0626b789

Branch: refs/heads/ambari-rest-api-explorer
Commit: 0626b789ba477c494dc5a260b2bca61e3d906690
Parents: 6c68321
Author: oleewere <ol...@gmail.com>
Authored: Mon May 22 20:31:16 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Mon May 22 20:31:24 2017 +0200

----------------------------------------------------------------------
 .../config/api/LogSearchConfigFactoryTest.java      | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0626b789/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactoryTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactoryTest.java b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactoryTest.java
index 8e7154e..425694f 100644
--- a/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactoryTest.java
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactoryTest.java
@@ -20,14 +20,12 @@
 package org.apache.ambari.logsearch.config.api;
 
 import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
 
-import org.apache.ambari.logsearch.config.api.LogSearchConfig;
-import org.apache.ambari.logsearch.config.api.LogSearchConfigFactory;
 import org.apache.ambari.logsearch.config.api.LogSearchConfig.Component;
 import org.junit.Test;
 
-import com.google.common.collect.ImmutableMap;
-
 import junit.framework.Assert;
 
 public class LogSearchConfigFactoryTest {
@@ -42,17 +40,19 @@ public class LogSearchConfigFactoryTest {
 
   @Test
   public void testCustomConfig() throws Exception {
+    Map<String, String> logsearchConfClassMap = new HashMap<>();
+    logsearchConfClassMap.put("logsearch.config.class", "org.apache.ambari.logsearch.config.api.LogSearchConfigClass2");
     LogSearchConfig config = LogSearchConfigFactory.createLogSearchConfig(Component.SERVER,
-        ImmutableMap.of("logsearch.config.class", "org.apache.ambari.logsearch.config.api.LogSearchConfigClass2"),
-        LogSearchConfigClass1.class);
+      logsearchConfClassMap, LogSearchConfigClass1.class);
     
     Assert.assertSame(config.getClass(), LogSearchConfigClass2.class);
   }
   
   @Test(expected = IllegalArgumentException.class)
   public void testNonConfigClass() throws Exception {
+    Map<String, String> logsearchConfClassMap = new HashMap<>();
+    logsearchConfClassMap.put("logsearch.config.class", "org.apache.ambari.logsearch.config.api.NonLogSearchConfigClass");
     LogSearchConfigFactory.createLogSearchConfig(Component.SERVER,
-        ImmutableMap.of("logsearch.config.class", "org.apache.ambari.logsearch.config.api.NonLogSearchConfigClass"),
-        LogSearchConfigClass1.class);
+      logsearchConfClassMap, LogSearchConfigClass1.class);
   }
 }


[34/50] [abbrv] ambari git commit: AMBARI-21057. Change Storage of Data on Request/Stage/Task To Reduce Redundency (dgrinenko via aonishuk)

Posted by ad...@apache.org.
AMBARI-21057. Change Storage of Data on Request/Stage/Task To Reduce Redundency (dgrinenko via aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cbb1e905
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cbb1e905
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cbb1e905

Branch: refs/heads/ambari-rest-api-explorer
Commit: cbb1e9059669b2af7d63323321980d8cc0f9203f
Parents: 1603cd6
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Mon May 22 12:03:00 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon May 22 12:03:00 2017 +0300

----------------------------------------------------------------------
 .../org/apache/ambari/server/checks/RangerSSLConfigCheck.java    | 1 -
 .../ambari/server/controller/internal/StageResourceProvider.java | 1 -
 .../ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java  | 1 -
 .../java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java | 4 ++--
 .../ambari/server/checks/ServiceCheckValidityCheckTest.java      | 1 -
 .../server/controller/internal/RequestStageContainerTest.java    | 1 -
 .../controller/logging/LogSearchDataRetrievalServiceTest.java    | 1 -
 .../apache/ambari/server/credentialapi/CredentialUtilTest.java   | 1 -
 .../authorization/AmbariPamAuthenticationProviderTest.java       | 1 -
 .../org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java  | 1 -
 10 files changed, 2 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/cbb1e905/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerSSLConfigCheck.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerSSLConfigCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerSSLConfigCheck.java
index 02f6559..47a0ea0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerSSLConfigCheck.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerSSLConfigCheck.java
@@ -24,7 +24,6 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.PrereqCheckRequest;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/cbb1e905/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
index 06aa68b..77757c6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
@@ -47,7 +47,6 @@ import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
 import org.apache.ambari.server.controller.utilities.PredicateHelper;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.orm.dao.HostRoleCommandStatusSummaryDTO;
-import org.apache.ambari.server.orm.dao.RequestDAO;
 import org.apache.ambari.server.orm.dao.StageDAO;
 import org.apache.ambari.server.orm.entities.StageEntity;
 import org.apache.ambari.server.state.Cluster;

http://git-wip-us.apache.org/repos/asf/ambari/blob/cbb1e905/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java
index a0765bf..83e422c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java
@@ -54,7 +54,6 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
-
 import org.apache.hadoop.metrics2.sink.timeline.cache.TimelineMetricsCache;
 import org.springframework.security.core.context.SecurityContextHolder;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/cbb1e905/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
index 5e2eb16..9255daf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
@@ -30,11 +30,11 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.inject.Inject;
 import com.google.inject.Injector;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The {@link UpgradeCatalog251} upgrades Ambari from 2.5.0 to 2.5.1.

http://git-wip-us.apache.org/repos/asf/ambari/blob/cbb1e905/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
index 996f349..253c835 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
@@ -45,7 +45,6 @@ import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
-
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/ambari/blob/cbb1e905/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestStageContainerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestStageContainerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestStageContainerTest.java
index 4fcc814..c001ab0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestStageContainerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestStageContainerTest.java
@@ -18,7 +18,6 @@
 
 package org.apache.ambari.server.controller.internal;
 
-import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.createStrictMock;
 import static org.easymock.EasyMock.expect;

http://git-wip-us.apache.org/repos/asf/ambari/blob/cbb1e905/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java
index 1bf0204..3370173 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java
@@ -22,7 +22,6 @@ import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.isA;
-
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;

http://git-wip-us.apache.org/repos/asf/ambari/blob/cbb1e905/ambari-server/src/test/java/org/apache/ambari/server/credentialapi/CredentialUtilTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/credentialapi/CredentialUtilTest.java b/ambari-server/src/test/java/org/apache/ambari/server/credentialapi/CredentialUtilTest.java
index a09f037..b82941a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/credentialapi/CredentialUtilTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/credentialapi/CredentialUtilTest.java
@@ -28,7 +28,6 @@ import java.util.Properties;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ToolRunner;
-
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;

http://git-wip-us.apache.org/repos/asf/ambari/blob/cbb1e905/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProviderTest.java
index b789470..c623000 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProviderTest.java
@@ -33,7 +33,6 @@ import org.apache.ambari.server.orm.dao.UserDAO;
 import org.apache.ambari.server.orm.entities.PrincipalEntity;
 import org.apache.ambari.server.orm.entities.UserEntity;
 import org.apache.ambari.server.security.ClientSecurityType;
-
 import org.easymock.EasyMock;
 import org.junit.After;
 import org.junit.Before;

http://git-wip-us.apache.org/repos/asf/ambari/blob/cbb1e905/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
index 3cb2c47..084a489 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
@@ -95,7 +95,6 @@ import org.junit.runner.RunWith;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Maps;
 import com.google.gson.Gson;
-
 import com.google.gson.JsonObject;
 import com.google.gson.JsonParser;
 import com.google.gson.JsonPrimitive;


[22/50] [abbrv] ambari git commit: AMBARI-21057. Change Storage of Data on Request/Stage/Task To Reduce Redundency (dgrinenko via dlysnichenko)

Posted by ad...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
index c1056dd..75ad9ab 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
@@ -193,7 +193,7 @@ public class TestActionDBAccessorImpl {
     List<Stage> stages = new ArrayList<>();
     stages.add(createStubStage(hostName, requestId, stageId));
     stages.add(createStubStage(hostName, requestId, stageId + 1));
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
     db.persistActions(request);
     assertEquals(2, stages.size());
   }
@@ -539,7 +539,7 @@ public class TestActionDBAccessorImpl {
   @Test
   public void testAbortRequest() throws AmbariException {
     Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action db accessor test",
-      "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+      "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
 
     clusters.addHost("host2");
@@ -576,7 +576,8 @@ public class TestActionDBAccessorImpl {
     String hostName = cmd.getHostName();
     cmd.setStatus(HostRoleStatus.COMPLETED);
 
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
+    request.setClusterHostInfo("clusterHostInfo");
     db.persistActions(request);
     db.abortOperation(requestId);
 
@@ -620,7 +621,7 @@ public class TestActionDBAccessorImpl {
 
     stages.add(stage);
 
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
 
     // persist entities
     db.persistActions(request);
@@ -668,7 +669,7 @@ public class TestActionDBAccessorImpl {
   @Test
   public void testGet1000TasksFromOracleDB() throws Exception {
     Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action db accessor test",
-      "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+      "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     for (int i = 1000; i < 2002; i++) {
       String host = "host" + i;
@@ -681,7 +682,8 @@ public class TestActionDBAccessorImpl {
 
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
+    request.setClusterHostInfo("clusterHostInfo");
     db.persistActions(request);
 
     List<HostRoleCommandEntity> entities =
@@ -709,7 +711,7 @@ public class TestActionDBAccessorImpl {
     Stage s = createStubStage(hostname, requestId, stageId);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
     db.persistActions(request);
   }
 
@@ -723,7 +725,7 @@ public class TestActionDBAccessorImpl {
       stages.add(stage);
     }
 
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
     db.persistActions(request);
   }
 
@@ -733,7 +735,7 @@ public class TestActionDBAccessorImpl {
     Stage s = createStubStage(hostname, requestId, stageId);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
 
     s.setHostRoleStatus(hostname, Role.HBASE_REGIONSERVER.name(), HostRoleStatus.COMPLETED);
     s.setHostRoleStatus(hostname, Role.HBASE_MASTER.name(), HostRoleStatus.COMPLETED);
@@ -747,7 +749,7 @@ public class TestActionDBAccessorImpl {
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
 
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
 
     s.setHostRoleStatus(hostname, Role.HBASE_REGIONSERVER.name(), HostRoleStatus.PENDING);
     s.setHostRoleStatus(hostname, Role.HBASE_MASTER.name(), HostRoleStatus.COMPLETED);
@@ -756,7 +758,7 @@ public class TestActionDBAccessorImpl {
 
   private Stage createStubStage(String hostname, long requestId, long stageId) {
     Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action db accessor test",
-      "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+      "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     s.addHostRoleExecutionCommand(hostname, Role.HBASE_MASTER,
         RoleCommand.START,
@@ -774,7 +776,7 @@ public class TestActionDBAccessorImpl {
   private void populateActionDBWithCustomAction(ActionDBAccessor db, String hostname,
                                 long requestId, long stageId) throws AmbariException {
     Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action db accessor test",
-      "", "commandParamsStage", "hostParamsStage");
+      "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     s.addHostRoleExecutionCommand(hostname, Role.valueOf(actionName),
         RoleCommand.ACTIONEXECUTE,
@@ -785,20 +787,22 @@ public class TestActionDBAccessorImpl {
     final RequestResourceFilter resourceFilter = new RequestResourceFilter("HBASE", "HBASE_MASTER", null);
     List<RequestResourceFilter> resourceFilters = new
       ArrayList<RequestResourceFilter>() {{ add(resourceFilter); }};
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
+    request.setClusterHostInfo("");
     db.persistActions(request);
   }
 
   private void populateActionDBWithServerAction(ActionDBAccessor db, String hostname,
                                                 long requestId, long stageId) throws AmbariException {
     Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action db accessor test",
-        "", "commandParamsStage", "hostParamsStage");
+        "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     s.addServerActionCommand(serverActionName, null, Role.AMBARI_SERVER_ACTION,
         RoleCommand.ACTIONEXECUTE, clusterName, null, null, "command details", null, 300, false, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
+    request.setClusterHostInfo("");
     db.persistActions(request);
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
index fbd7c4e..410de80 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
@@ -220,7 +220,7 @@ public class TestActionManager {
   }
 
   private void populateActionDB(ActionDBAccessor db, String hostname) throws AmbariException {
-    Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action manager test", "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+    Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action manager test", "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     s.addHostRoleExecutionCommand(hostname, Role.HBASE_MASTER,
         RoleCommand.START,
@@ -228,12 +228,12 @@ public class TestActionManager {
             hostname, System.currentTimeMillis()), "cluster1", "HBASE", false, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "clusterHostInfo", clusters);
     db.persistActions(request);
   }
 
   private void populateActionDBWithTwoCommands(ActionDBAccessor db, String hostname) throws AmbariException {
-    Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action manager test", "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+    Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action manager test", "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     s.addHostRoleExecutionCommand(hostname, Role.HBASE_MASTER,
         RoleCommand.START,
@@ -245,7 +245,7 @@ public class TestActionManager {
           hostname, System.currentTimeMillis()), "cluster1", "HBASE", false, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "clusterHostInfo", clusters);
     db.persistActions(request);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
index b1a7524..869234b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
@@ -210,7 +210,7 @@ public class TestActionScheduler {
     ActionDBAccessor db = mock(ActionDBAccessorImpl.class);
     HostRoleCommandDAO hostRoleCommandDAOMock = mock(HostRoleCommandDAO.class);
     Mockito.doNothing().when(hostRoleCommandDAOMock).publishTaskCreateEvent(anyListOf(HostRoleCommand.class));
-    Stage s = StageUtils.getATestStage(1, 977, hostname, CLUSTER_HOST_INFO,
+    Stage s = StageUtils.getATestStage(1, 977, hostname,
       "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
 
     List<Stage> stages = Collections.singletonList(s);
@@ -219,8 +219,10 @@ public class TestActionScheduler {
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
+
     //Keep large number of attempts so that the task is not expired finally
     //Small action timeout to test rescheduling
     ActionScheduler scheduler = new ActionScheduler(100, 5, db, aq, fsm,
@@ -306,7 +308,7 @@ public class TestActionScheduler {
     hostEntity.setHostName(hostname);
     hostDAO.create(hostEntity);
 
-    final Stage s = StageUtils.getATestStage(1, 977, hostname, CLUSTER_HOST_INFO,
+    final Stage s = StageUtils.getATestStage(1, 977, hostname,
       "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
     s.addHostRoleExecutionCommand(hostname, Role.SECONDARY_NAMENODE, RoleCommand.INSTALL,
             new ServiceComponentHostInstallEvent("SECONDARY_NAMENODE", hostname, System.currentTimeMillis(), "HDP-1.2.0"),
@@ -322,6 +324,7 @@ public class TestActionScheduler {
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     doAnswer(new Answer<Void>() {
@@ -395,7 +398,7 @@ public class TestActionScheduler {
     when(host.getState()).thenReturn(HostState.HEARTBEAT_LOST);
     when(host.getHostName()).thenReturn(hostname);
 
-    final Stage s = StageUtils.getATestStage(1, 977, hostname, CLUSTER_HOST_INFO,
+    final Stage s = StageUtils.getATestStage(1, 977, hostname,
       "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
 
     List<Stage> stages = Collections.singletonList(s);
@@ -483,7 +486,7 @@ public class TestActionScheduler {
     when(serviceObj.getCluster()).thenReturn(oneClusterMock);
 
     final Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 1L, "stageWith2Tasks",
-      CLUSTER_HOST_INFO, "{\"command_param\":\"param_value\"}", "{\"host_param\":\"param_value\"}");
+      "{\"command_param\":\"param_value\"}", "{\"host_param\":\"param_value\"}");
     addInstallTaskToStage(stage, hostname1, "cluster1", Role.DATANODE,
       RoleCommand.INSTALL, Service.Type.HDFS, 1);
     addInstallTaskToStage(stage, hostname2, "cluster1", Role.NAMENODE,
@@ -613,6 +616,7 @@ public class TestActionScheduler {
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stages.size());
@@ -730,6 +734,7 @@ public class TestActionScheduler {
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stages.size());
@@ -773,6 +778,7 @@ public class TestActionScheduler {
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stages.size());
@@ -987,6 +993,7 @@ public class TestActionScheduler {
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stages.size());
@@ -1053,7 +1060,7 @@ public class TestActionScheduler {
       String requestContext, int timeout, boolean stageSupportsAutoSkip,
       boolean autoSkipFailedTask) {
 
-    Stage stage = stageFactory.createNew(requestId, "/tmp", "cluster1", 1L, requestContext, CLUSTER_HOST_INFO,
+    Stage stage = stageFactory.createNew(requestId, "/tmp", "cluster1", 1L, requestContext,
       "{}", "{}");
 
     stage.setStageId(stageId);
@@ -1141,6 +1148,7 @@ public class TestActionScheduler {
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(firstStageInProgressPerRequest.size());
@@ -1232,6 +1240,7 @@ public class TestActionScheduler {
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stages.size());
@@ -1309,6 +1318,7 @@ public class TestActionScheduler {
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stages.size());
@@ -1374,6 +1384,7 @@ public class TestActionScheduler {
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stages.size());
@@ -1520,7 +1531,7 @@ public class TestActionScheduler {
 
     long now = System.currentTimeMillis();
     Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 1L,
-        "testRequestFailureBasedOnSuccessFactor", CLUSTER_HOST_INFO, "", "");
+        "testRequestFailureBasedOnSuccessFactor", "", "");
     stage.setStageId(1);
 
     addHostRoleExecutionCommand(now, stage, Role.SQOOP, Service.Type.SQOOP,
@@ -1720,7 +1731,7 @@ public class TestActionScheduler {
 
     long now = System.currentTimeMillis();
     Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 1L, "testRequestFailureBasedOnSuccessFactor",
-      CLUSTER_HOST_INFO, "", "");
+      "", "");
     stage.setStageId(1);
     stage.addHostRoleExecutionCommand("host1", Role.DATANODE, RoleCommand.UPGRADE,
         new ServiceComponentHostUpgradeEvent(Role.DATANODE.toString(), "host1", now, "HDP-0.2"),
@@ -1871,7 +1882,7 @@ public class TestActionScheduler {
 
   private Stage createStage(String clusterName, int stageId, int requestId) {
     Stage stage = stageFactory.createNew(requestId, "/tmp", clusterName, 1L, "getStageWithSingleTask",
-      CLUSTER_HOST_INFO, "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
+      "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
     stage.setStageId(stageId);
     return stage;
   }
@@ -1971,7 +1982,6 @@ public class TestActionScheduler {
 
     //Data for stages
     Map<String, Set<String>> clusterHostInfo1 = StageUtils.getGson().fromJson(CLUSTER_HOST_INFO, type);
-    Map<String, Set<String>> clusterHostInfo2 = StageUtils.getGson().fromJson(CLUSTER_HOST_INFO_UPDATED, type);
     int stageId = 1;
     int requestId1 = 1;
     int requestId2 = 2;
@@ -2006,11 +2016,12 @@ public class TestActionScheduler {
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
-    Stage s1 = StageUtils.getATestStage(requestId1, stageId, hostname, CLUSTER_HOST_INFO,
+    Stage s1 = StageUtils.getATestStage(requestId1, stageId, hostname,
       "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
-    Stage s2 = StageUtils.getATestStage(requestId2, stageId, hostname, CLUSTER_HOST_INFO_UPDATED,
+    Stage s2 = StageUtils.getATestStage(requestId2, stageId, hostname,
       "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
 
     when(db.getCommandsInProgressCount()).thenReturn(1);
@@ -2037,7 +2048,7 @@ public class TestActionScheduler {
     ac = waitForQueueSize(hostname, aq, 1, scheduler);
     assertTrue(ac.get(0) instanceof ExecutionCommand);
     assertEquals(String.valueOf(requestId2) + "-" + stageId, ((ExecutionCommand) (ac.get(0))).getCommandId());
-    assertEquals(clusterHostInfo2, ((ExecutionCommand) (ac.get(0))).getClusterHostInfo());
+    assertEquals(clusterHostInfo1, ((ExecutionCommand) (ac.get(0))).getClusterHostInfo());
   }
 
 
@@ -2087,7 +2098,7 @@ public class TestActionScheduler {
     when(serviceObj.getCluster()).thenReturn(oneClusterMock);
 
     Stage stage1 = stageFactory.createNew(1, "/tmp", "cluster1", 1L, "stageWith2Tasks",
-            CLUSTER_HOST_INFO, "", "");
+            "", "");
     addInstallTaskToStage(stage1, hostname1, "cluster1", Role.HBASE_MASTER,
             RoleCommand.INSTALL, Service.Type.HBASE, 1);
     addInstallTaskToStage(stage1, hostname1, "cluster1", Role.HBASE_REGIONSERVER,
@@ -2166,6 +2177,7 @@ public class TestActionScheduler {
     Mockito.doNothing().when(hostRoleCommandDAOMock).publishTaskCreateEvent(anyListOf(HostRoleCommand.class));
 
     RequestEntity request = mock(RequestEntity.class);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(request.isExclusive()).thenReturn(false);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
@@ -2319,6 +2331,7 @@ public class TestActionScheduler {
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stagesInProgress.size());
@@ -2569,10 +2582,13 @@ public class TestActionScheduler {
 
     RequestEntity request1 = mock(RequestEntity.class);
     when(request1.isExclusive()).thenReturn(false);
+    when(request1.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     RequestEntity request2 = mock(RequestEntity.class);
     when(request2.isExclusive()).thenReturn(true);
+    when(request2.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     RequestEntity request3 = mock(RequestEntity.class);
     when(request3.isExclusive()).thenReturn(false);
+    when(request3.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
 
     when(db.getRequestEntity(requestId1)).thenReturn(request1);
     when(db.getRequestEntity(requestId2)).thenReturn(request2);
@@ -2764,6 +2780,7 @@ public class TestActionScheduler {
     Mockito.doNothing().when(hostRoleCommandDAOMock).publishTaskCreateEvent(anyListOf(HostRoleCommand.class));
 
     RequestEntity request = mock(RequestEntity.class);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(request.isExclusive()).thenReturn(false);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestStage.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestStage.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestStage.java
index b76e41e..82db6e1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestStage.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestStage.java
@@ -60,7 +60,7 @@ public class TestStage {
 
   @Test
   public void testTaskTimeout() {
-    Stage s = StageUtils.getATestStage(1, 1, "h1", CLUSTER_HOST_INFO, "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
+    Stage s = StageUtils.getATestStage(1, 1, "h1",  "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
     s.addHostRoleExecutionCommand("h1", Role.DATANODE, RoleCommand.INSTALL,
         null, "c1", "HDFS", false, false);
     s.addHostRoleExecutionCommand("h1", Role.HBASE_MASTER, RoleCommand.INSTALL,
@@ -75,9 +75,8 @@ public class TestStage {
 
   @Test
   public void testGetRequestContext() {
-    Stage stage = stageFactory.createNew(1, "/logDir", "c1", 1L, "My Context", CLUSTER_HOST_INFO, "", "");
+    Stage stage = stageFactory.createNew(1, "/logDir", "c1", 1L, "My Context",  "", "");
     assertEquals("My Context", stage.getRequestContext());
-    assertEquals(CLUSTER_HOST_INFO, stage.getClusterHostInfo());
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
index 2dd91c0..ceda927 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
@@ -987,7 +987,7 @@ public class HeartbeatProcessorTest {
     serviceComponentHost2.setStackVersion(stack120);
 
     Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action manager test",
-        "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+        "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     s.addHostRoleExecutionCommand(DummyHostname1, Role.DATANODE, RoleCommand.UPGRADE,
         new ServiceComponentHostUpgradeEvent(Role.DATANODE.toString(),
@@ -999,7 +999,7 @@ public class HeartbeatProcessorTest {
         DummyCluster, "HDFS", false, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "clusterHostInfo", clusters);
     actionDBAccessor.persistActions(request);
     CommandReport cr = new CommandReport();
     cr.setActionId(StageUtils.getActionId(requestId, stageId));

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
index 2e65e8d..a13053c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
@@ -223,7 +223,7 @@ public class HeartbeatTestHelper {
 
   public void populateActionDB(ActionDBAccessor db, String DummyHostname1, long requestId, long stageId) throws AmbariException {
     Stage s = stageFactory.createNew(requestId, "/a/b", DummyCluster, 1L, "heartbeat handler test",
-        "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+        "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     String filename = null;
     s.addHostRoleExecutionCommand(DummyHostname1, Role.HBASE_MASTER,
@@ -232,7 +232,7 @@ public class HeartbeatTestHelper {
             DummyHostname1, System.currentTimeMillis()), DummyCluster, HBASE, false, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "clusterHostInfo", clusters);
     db.persistActions(request);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
index 909bf69..5ced924 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
@@ -864,7 +864,7 @@ public class TestHeartbeatHandler {
     serviceComponentHost1.setState(State.INSTALLING);
 
     Stage s = stageFactory.createNew(1, "/a/b", "cluster1", 1L, "action manager test",
-      "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+      "commandParamsStage", "hostParamsStage");
     s.setStageId(1);
     s.addHostRoleExecutionCommand(DummyHostname1, Role.DATANODE, RoleCommand.INSTALL,
       new ServiceComponentHostInstallEvent(Role.DATANODE.toString(),
@@ -872,7 +872,7 @@ public class TestHeartbeatHandler {
           DummyCluster, "HDFS", false, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "clusterHostInfo", clusters);
     actionDBAccessor.persistActions(request);
     actionDBAccessor.abortHostRole(DummyHostname1, 1, 1, Role.DATANODE.name());
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 628a56f..cbd5de3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -8272,7 +8272,7 @@ public class AmbariManagementControllerTest {
 
     List<Stage> stages = new ArrayList<>();
     stages.add(stageFactory.createNew(requestId1, "/a1", cluster1, clusterId, context,
-        CLUSTER_HOST_INFO, "", ""));
+        "", ""));
     stages.get(0).setStageId(1);
     stages.get(0).addHostRoleExecutionCommand(hostName1, Role.HBASE_MASTER,
             RoleCommand.START,
@@ -8281,7 +8281,7 @@ public class AmbariManagementControllerTest {
             cluster1, "HBASE", false, false);
 
     stages.add(stageFactory.createNew(requestId1, "/a2", cluster1, clusterId, context,
-      CLUSTER_HOST_INFO, "", ""));
+      "", ""));
     stages.get(1).setStageId(2);
     stages.get(1).addHostRoleExecutionCommand(hostName1, Role.HBASE_CLIENT,
             RoleCommand.START,
@@ -8289,19 +8289,19 @@ public class AmbariManagementControllerTest {
                     hostName1, System.currentTimeMillis()), cluster1, "HBASE", false, false);
 
     stages.add(stageFactory.createNew(requestId1, "/a3", cluster1, clusterId, context,
-      CLUSTER_HOST_INFO, "", ""));
+      "", ""));
     stages.get(2).setStageId(3);
     stages.get(2).addHostRoleExecutionCommand(hostName1, Role.HBASE_CLIENT,
             RoleCommand.START,
             new ServiceComponentHostStartEvent(Role.HBASE_CLIENT.toString(),
                     hostName1, System.currentTimeMillis()), cluster1, "HBASE", false, false);
 
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
     actionDB.persistActions(request);
 
     stages.clear();
     stages.add(stageFactory.createNew(requestId2, "/a4", cluster1, clusterId, context,
-      CLUSTER_HOST_INFO, "", ""));
+      "", ""));
     stages.get(0).setStageId(4);
     stages.get(0).addHostRoleExecutionCommand(hostName1, Role.HBASE_CLIENT,
             RoleCommand.START,
@@ -8309,14 +8309,14 @@ public class AmbariManagementControllerTest {
                     hostName1, System.currentTimeMillis()), cluster1, "HBASE", false, false);
 
     stages.add(stageFactory.createNew(requestId2, "/a5", cluster1, clusterId, context,
-      CLUSTER_HOST_INFO, "", ""));
+      "", ""));
     stages.get(1).setStageId(5);
     stages.get(1).addHostRoleExecutionCommand(hostName1, Role.HBASE_CLIENT,
             RoleCommand.START,
             new ServiceComponentHostStartEvent(Role.HBASE_CLIENT.toString(),
                     hostName1, System.currentTimeMillis()), cluster1, "HBASE", false, false);
 
-    request = new Request(stages, clusters);
+    request = new Request(stages, "", clusters);
     actionDB.persistActions(request);
 
     // Add a stage to execute a task as server-side action on the Ambari server
@@ -8324,12 +8324,12 @@ public class AmbariManagementControllerTest {
         new ServiceComponentHostServerActionEvent(Role.AMBARI_SERVER_ACTION.toString(), null, System.currentTimeMillis());
     stages.clear();
     stages.add(stageFactory.createNew(requestId3, "/a6", cluster1, clusterId, context,
-      CLUSTER_HOST_INFO, "", ""));
+      "", ""));
     stages.get(0).setStageId(6);
     stages.get(0).addServerActionCommand("some.action.class.name", null, Role.AMBARI_SERVER_ACTION,
         RoleCommand.EXECUTE, cluster1, serviceComponentHostServerActionEvent, null, null, null, null, false, false);
     assertEquals("_internal_ambari", stages.get(0).getOrderedHostRoleCommands().get(0).getHostName());
-    request = new Request(stages, clusters);
+    request = new Request(stages, "", clusters);
     actionDB.persistActions(request);
 
     org.apache.ambari.server.controller.spi.Request spiRequest = PropertyHelper.getReadRequest(

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
index 5275580..e654c72 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
@@ -1103,6 +1103,8 @@ public class KerberosHelperTest extends EasyMockSupport {
     // Create Preparation Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
 
@@ -1110,28 +1112,38 @@ public class KerberosHelperTest extends EasyMockSupport {
       // Create Principals Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Create Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(0L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Distribute Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
     }
     // Update Configs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // TODO: Add more of these when more stages are added.
     // Clean-up/Finalize Stage
     expect(requestStageContainer.getLastStageId()).andReturn(3L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
 
@@ -1294,36 +1306,50 @@ public class KerberosHelperTest extends EasyMockSupport {
     // Create Preparation Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
-    requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
+    requestStageContainer.addStages(anyObject(List.class));
     expectLastCall().once();
 
     if (identitiesManaged) {
       // Create Principals Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
-      requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
+      requestStageContainer.addStages(anyObject(List.class));
       expectLastCall().once();
       // Create Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(0L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
-      requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
+      requestStageContainer.addStages(anyObject(List.class));
       expectLastCall().once();
       // Distribute Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
-      requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
+      requestStageContainer.addStages(anyObject(List.class));
       expectLastCall().once();
     }
     // Update Configs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
-    requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
+    requestStageContainer.addStages(anyObject(List.class));
     expectLastCall().once();
     // TODO: Add more of these when more stages are added.
     // Clean-up/Finalize Stage
     expect(requestStageContainer.getLastStageId()).andReturn(3L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
-    requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
+
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
+
+    requestStageContainer.addStages(anyObject(List.class));
     expectLastCall().once();
 
     replayAll();
@@ -1479,42 +1505,58 @@ public class KerberosHelperTest extends EasyMockSupport {
     // Hook Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(anyObject(List.class));
     expectLastCall().once();
     // StopZk Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(anyObject(List.class));
     expectLastCall().once();
     // Preparation Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Update Configs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Destroy Principals Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Delete Keytabs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Finalize Stage
     expect(requestStageContainer.getLastStageId()).andReturn(3L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
 
     // Cleanup Stage
     expect(requestStageContainer.getLastStageId()).andReturn(3L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
 
@@ -1687,31 +1729,43 @@ public class KerberosHelperTest extends EasyMockSupport {
       // Create Preparation Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Create Principals Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Create Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(0L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Distribute Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Update Configurations Stage
       expect(requestStageContainer.getLastStageId()).andReturn(1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Clean-up/Finalize Stage
       expect(requestStageContainer.getLastStageId()).andReturn(3L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
     } else {
@@ -2809,7 +2863,7 @@ public class KerberosHelperTest extends EasyMockSupport {
   private void setupStageFactory() {
     final StageFactory stageFactory = injector.getInstance(StageFactory.class);
     expect(stageFactory.createNew(anyLong(), anyObject(String.class), anyObject(String.class),
-        anyLong(), anyObject(String.class), anyObject(String.class), anyObject(String.class),
+        anyLong(), anyObject(String.class), anyObject(String.class),
         anyObject(String.class)))
         .andAnswer(new IAnswer<Stage>() {
           @Override
@@ -3061,26 +3115,36 @@ public class KerberosHelperTest extends EasyMockSupport {
     // Preparation Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Create Principals Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Create Keytabs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Distribute Keytabs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Clean-up/Finalize Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
 
@@ -3253,21 +3317,29 @@ public class KerberosHelperTest extends EasyMockSupport {
     // Preparation Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Delete Principals Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Delete Keytabs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Clean-up/Finalize Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
 
@@ -3420,26 +3492,37 @@ public class KerberosHelperTest extends EasyMockSupport {
       // Preparation Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Create Principals Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
+
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Create Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Distribute Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Clean-up/Finalize Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
     }
@@ -3582,21 +3665,29 @@ public class KerberosHelperTest extends EasyMockSupport {
     // Preparation Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Delete Principals Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Delete Keytabs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Clean-up/Finalize Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
index c399a4c..a0ec67f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
@@ -743,7 +743,7 @@ public class CalculatedStatusTest {
     private final List<HostRoleCommand> hostRoleCommands = new LinkedList<>();
 
     private TestStage() {
-      super(1L, "", "", 1L, "", "", "", "", hostRoleCommandFactory, ecwFactory);
+      super(1L, "", "", 1L, "", "", "", hostRoleCommandFactory, ecwFactory);
     }
 
     void setHostRoleCommands(Collection<HostRoleCommandEntity> tasks) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
index 8f7b31d..eaf54c3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
@@ -337,7 +337,7 @@ public class ClusterStackVersionResourceProviderTest {
     // Check that we create proper stage count
     expect(stageFactory.createNew(anyLong(), anyObject(String.class),
             anyObject(String.class), anyLong(),
-            anyObject(String.class), anyObject(String.class), anyObject(String.class),
+            anyObject(String.class), anyObject(String.class),
             anyObject(String.class))).andReturn(stage).
             times((int) Math.ceil(hostCount / MAX_TASKS_PER_STAGE));
 
@@ -555,7 +555,7 @@ public class ClusterStackVersionResourceProviderTest {
     // Check that we create proper stage count
     expect(stageFactory.createNew(anyLong(), anyObject(String.class),
             anyObject(String.class), anyLong(),
-            anyObject(String.class), anyObject(String.class), anyObject(String.class),
+            anyObject(String.class), anyObject(String.class),
             anyObject(String.class))).andReturn(stage).
             times((int) Math.ceil(hostCount / MAX_TASKS_PER_STAGE));
 
@@ -784,7 +784,7 @@ public class ClusterStackVersionResourceProviderTest {
     // Check that we create proper stage count
     expect(stageFactory.createNew(anyLong(), anyObject(String.class),
             anyObject(String.class), anyLong(),
-            anyObject(String.class), anyObject(String.class), anyObject(String.class),
+            anyObject(String.class), anyObject(String.class),
             anyObject(String.class))).andReturn(stage).
             times((int) Math.ceil(hostCount / MAX_TASKS_PER_STAGE));
 
@@ -1027,7 +1027,7 @@ public class ClusterStackVersionResourceProviderTest {
     // Check that we create proper stage count
     expect(stageFactory.createNew(anyLong(), anyObject(String.class),
             anyObject(String.class), anyLong(),
-            anyObject(String.class), anyObject(String.class), anyObject(String.class),
+            anyObject(String.class), anyObject(String.class),
             anyObject(String.class))).andReturn(stage).
             times((int) Math.ceil(hostCount / MAX_TASKS_PER_STAGE));
 
@@ -1557,7 +1557,7 @@ public class ClusterStackVersionResourceProviderTest {
     // Check that we create proper stage count
     expect(stageFactory.createNew(anyLong(), anyObject(String.class),
         anyObject(String.class), anyLong(),
-        anyObject(String.class), anyObject(String.class), anyObject(String.class),
+        anyObject(String.class), anyObject(String.class),
         anyObject(String.class))).andReturn(stage).
         times((int) Math.ceil(hostCount / MAX_TASKS_PER_STAGE));
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestStageContainerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestStageContainerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestStageContainerTest.java
index f60915c..4fcc814 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestStageContainerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestStageContainerTest.java
@@ -18,6 +18,7 @@
 
 package org.apache.ambari.server.controller.internal;
 
+import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.createStrictMock;
 import static org.easymock.EasyMock.expect;
@@ -143,7 +144,7 @@ public class RequestStageContainerTest {
     stages.add(stage2);
 
     //expectations
-    expect(requestFactory.createNewFromStages(stages)).andReturn(request);
+    expect(requestFactory.createNewFromStages(stages, "{}")).andReturn(request);
     expect(request.getStages()).andReturn(stages).anyTimes();
     actionManager.sendActions(request, null);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/hooks/users/UserHookServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/hooks/users/UserHookServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/hooks/users/UserHookServiceTest.java
index e8eb3e8..56dd1e2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/hooks/users/UserHookServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/hooks/users/UserHookServiceTest.java
@@ -217,8 +217,8 @@ public class UserHookServiceTest extends EasyMockSupport {
     // TBD refine expectations to validate the logic / eg capture arguments
     stageMock.addServerActionCommand(EasyMock.anyString(), EasyMock.anyString(), EasyMock.anyObject(Role.class), EasyMock.anyObject(RoleCommand.class), EasyMock.anyString(), EasyMock.anyObject(ServiceComponentHostServerActionEvent.class),
         EasyMock.<Map<String, String>>anyObject(), EasyMock.anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject(), EasyMock.anyInt(), EasyMock.anyBoolean(), EasyMock.anyBoolean());
-    EasyMock.expect(requestFactoryMock.createNewFromStages(Arrays.asList(stageMock))).andReturn(null);
-    EasyMock.expect(stageFactoryMock.createNew(1, "/var/lib/ambari-server/tmp:1", "test-cluster", 1, "Post user creation hook for [ 1 ] users", "{}", "{}", "{}")).andReturn(stageMock);
+    EasyMock.expect(requestFactoryMock.createNewFromStages(Arrays.asList(stageMock), "{}")).andReturn(null);
+    EasyMock.expect(stageFactoryMock.createNew(1, "/var/lib/ambari-server/tmp:1", "test-cluster", 1, "Post user creation hook for [ 1 ] users", "{}", "{}")).andReturn(stageMock);
 
 
     replayAll();

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/serveraction/ServerActionExecutorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/ServerActionExecutorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/ServerActionExecutorTest.java
index 2feef41..4d1b48b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/ServerActionExecutorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/ServerActionExecutorTest.java
@@ -127,7 +127,7 @@ public class ServerActionExecutorTest {
     final Request request = createMockRequest();
     stageFactory = createNiceMock(StageFactory.class);
 
-    final Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 978, "context", CLUSTER_HOST_INFO,
+    final Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 978, "context",
         "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
 
     stage.addServerActionCommand(ManualStageAction.class.getName(),
@@ -318,14 +318,13 @@ public class ServerActionExecutorTest {
                                                 final int timeout) {
     stageFactory = createNiceMock(StageFactory.class);
     expect(stageFactory.createNew(anyLong(), anyObject(String.class), anyObject(String.class),
-        anyLong(), anyObject(String.class), anyObject(String.class),
-        anyObject(String.class), anyObject(String.class))).
+        anyLong(), anyObject(String.class), anyObject(String.class), anyObject(String.class))).
         andAnswer(new IAnswer<Stage>() {
 
           @Override
           public Stage answer() throws Throwable {
             Stage stage = stageFactory.createNew(requestId, "/tmp", "cluster1",
-                1L, requestContext, CLUSTER_HOST_INFO, "{}", "{}");
+                1L, requestContext, "{}", "{}");
 
             stage.setStageId(stageId);
             stage.addServerActionCommand(MockServerAction.class.getName(), null,
@@ -338,7 +337,7 @@ public class ServerActionExecutorTest {
           }
         });
 
-    Stage stage = stageFactory.createNew(requestId, "", "", 1L, "", "", "", "");
+    Stage stage = stageFactory.createNew(requestId, "", "", 1L, "", "", "");
     return stage;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java b/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
index 7063147..3e592b2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
@@ -126,7 +126,7 @@ public class TestStagePlanner {
     RoleGraph rg = roleGraphFactory.createNew(rco);
     long now = System.currentTimeMillis();
     Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 1L, "execution command wrapper test",
-      "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+      "commandParamsStage", "hostParamsStage");
     stage.setStageId(1);
     stage.addServerActionCommand("RESTART", null, Role.HIVE_METASTORE,
       RoleCommand.CUSTOM_COMMAND, "cluster1",

http://git-wip-us.apache.org/repos/asf/ambari/blob/f2bbe478/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
index e9bd27c..5b39086 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
@@ -194,7 +194,7 @@ public class StageUtilsTest extends EasyMockSupport {
       JsonMappingException, JAXBException, IOException {
     StageUtils stageUtils = new StageUtils(injector.getInstance(StageFactory.class));
     Stage s = StageUtils.getATestStage(1, 2, "host1", "clusterHostInfo", "hostParamsStage");
-    ExecutionCommand cmd = s.getExecutionCommands(getHostName()).get(0).getExecutionCommand();
+    ExecutionCommand cmd = s.getExecutionCommands("host1").get(0).getExecutionCommand();
     HashMap<String, Map<String, String>> configTags = new HashMap<>();
     Map<String, String> globalTag = new HashMap<>();
     globalTag.put("tag", "version1");


[40/50] [abbrv] ambari git commit: AMBARI-21060. HDP 3.0 TP - create service definition for Oozie with configs, kerberos, widgets, etc.(vbrodetskyi)

Posted by ad...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_server.py
new file mode 100644
index 0000000..9320bc3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_server.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core import Logger
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions.constants import Direction
+from resource_management.libraries.functions.security_commons import build_expectations
+from resource_management.libraries.functions.security_commons import cached_kinit_executor
+from resource_management.libraries.functions.security_commons import get_params_from_filesystem
+from resource_management.libraries.functions.security_commons import validate_security_config_properties
+from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
+
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
+
+from oozie import oozie
+from oozie_service import oozie_service
+from oozie_server_upgrade import OozieUpgrade
+
+from check_oozie_server_status import check_oozie_server_status
+from resource_management.core.resources.zkmigrator import ZkMigrator
+
+class OozieServer(Script):
+
+  def get_component_name(self):
+    return "oozie-server"
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env, upgrade_type=None):
+    import params
+
+    # The configure command doesn't actually receive the upgrade_type from Script.py, so get it from the config dictionary
+    if upgrade_type is None:
+      upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", ""))
+
+    if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and params.version is not None:
+      Logger.info(format("Configuring Oozie during upgrade type: {upgrade_type}, direction: {params.upgrade_direction}, and version {params.version}"))
+      if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+        # In order for the "<stack-root>/current/oozie-<client/server>" point to the new version of
+        # oozie, we need to create the symlinks both for server and client.
+        # This is required as both need to be pointing to new installed oozie version.
+
+        # Sets the symlink : eg: <stack-root>/current/oozie-client -> <stack-root>/a.b.c.d-<version>/oozie
+        stack_select.select("oozie-client", params.version)
+        # Sets the symlink : eg: <stack-root>/current/oozie-server -> <stack-root>/a.b.c.d-<version>/oozie
+        stack_select.select("oozie-server", params.version)
+
+      if params.version and check_stack_feature(StackFeature.CONFIG_VERSIONING, params.version):
+        conf_select.select(params.stack_name, "oozie", params.version)
+
+    env.set_params(params)
+    oozie(is_server=True)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    self.configure(env)
+
+    # preparing the WAR file must run after configure since configure writes out
+    # oozie-env.sh which is needed to have the right environment directories setup!
+    if upgrade_type is not None:
+      OozieUpgrade.prepare_warfile()
+
+    oozie_service(action='start', upgrade_type=upgrade_type)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    oozie_service(action='stop', upgrade_type=upgrade_type)
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_oozie_server_status()
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class OozieServerDefault(OozieServer):
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    """
+    Performs the tasks that should be done before an upgrade of oozie. This includes:
+      - backing up configurations
+      - running <stack-selector-tool> and <conf-selector-tool>
+      - restoring configurations
+      - preparing the libext directory
+    :param env:
+    :return:
+    """
+    import params
+    env.set_params(params)
+
+    # this function should not execute if the version can't be determined or
+    # the stack does not support rolling upgrade
+    if not (params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version)):
+      return
+
+    Logger.info("Executing Oozie Server Stack Upgrade pre-restart")
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "oozie", params.version)
+      stack_select.select("oozie-server", params.version)
+
+    OozieUpgrade.prepare_libext_directory()
+
+  def disable_security(self, env):
+    import params
+    if not params.stack_supports_zk_security:
+      Logger.info("Stack doesn't support zookeeper security")
+      return
+    if not params.zk_connection_string:
+      Logger.info("No zookeeper connection string. Skipping reverting ACL")
+      return
+    zkmigrator = ZkMigrator(params.zk_connection_string, params.java_exec, params.java64_home, params.jaas_file, params.oozie_user)
+    zkmigrator.set_acls(params.zk_namespace if params.zk_namespace.startswith('/') else '/' + params.zk_namespace, 'world:anyone:crdwa')
+
+  def get_log_folder(self):
+    import params
+    return params.oozie_log_dir
+  
+  def get_user(self):
+    import params
+    return params.oozie_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_file]
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class OozieServerWindows(OozieServer):
+  pass
+
+if __name__ == "__main__":
+  OozieServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_server_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_server_upgrade.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_server_upgrade.py
new file mode 100644
index 0000000..402c7cb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_server_upgrade.py
@@ -0,0 +1,237 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import glob
+import os
+import shutil
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import Directory
+from resource_management.core.resources.system import File
+from resource_management.libraries.functions import Direction
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.oozie_prepare_war import prepare_war
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+
+import oozie
+
+BACKUP_TEMP_DIR = "oozie-upgrade-backup"
+BACKUP_CONF_ARCHIVE = "oozie-conf-backup.tar"
+
+class OozieUpgrade(Script):
+
+  @staticmethod
+  def prepare_libext_directory():
+    """
+    Performs the following actions on libext:
+      - creates <stack-root>/current/oozie/libext and recursively
+      - set 777 permissions on it and its parents.
+      - downloads JDBC driver JAR if needed
+      - copies Falcon JAR for the Oozie WAR if needed
+    """
+    import params
+
+    # some stack versions don't need the lzo compression libraries
+    target_version_needs_compression_libraries = params.version and check_stack_feature(StackFeature.LZO, params.version)
+
+    # ensure the directory exists
+    Directory(params.oozie_libext_dir, mode = 0777)
+
+    # get all hadooplzo* JAR files
+    # <stack-selector-tool> set hadoop-client has not run yet, therefore we cannot use
+    # <stack-root>/current/hadoop-client ; we must use params.version directly
+    # however, this only works when upgrading beyond 2.2.0.0; don't do this
+    # for downgrade to 2.2.0.0 since hadoop-lzo will not be present
+    # This can also be called during a Downgrade.
+    # When a version is Installed, it is responsible for downloading the hadoop-lzo packages
+    # if lzo is enabled.
+    if params.lzo_enabled and (params.upgrade_direction == Direction.UPGRADE or target_version_needs_compression_libraries):
+      hadoop_lzo_pattern = 'hadoop-lzo*.jar'
+      hadoop_client_new_lib_dir = format("{stack_root}/{version}/hadoop/lib")
+
+      files = glob.iglob(os.path.join(hadoop_client_new_lib_dir, hadoop_lzo_pattern))
+      if not files:
+        raise Fail("There are no files at {0} matching {1}".format(
+          hadoop_client_new_lib_dir, hadoop_lzo_pattern))
+
+      # copy files into libext
+      files_copied = False
+      for file in files:
+        if os.path.isfile(file):
+          Logger.info("Copying {0} to {1}".format(str(file), params.oozie_libext_dir))
+          shutil.copy2(file, params.oozie_libext_dir)
+          files_copied = True
+
+      if not files_copied:
+        raise Fail("There are no files at {0} matching {1}".format(
+          hadoop_client_new_lib_dir, hadoop_lzo_pattern))
+
+    # copy ext ZIP to libext dir
+    oozie_ext_zip_file = params.ext_js_path
+
+    # something like <stack-root>/current/oozie-server/libext/ext-2.2.zip
+    oozie_ext_zip_target_path = os.path.join(params.oozie_libext_dir, params.ext_js_file)
+
+    if not os.path.isfile(oozie_ext_zip_file):
+      raise Fail("Unable to copy {0} because it does not exist".format(oozie_ext_zip_file))
+
+    Logger.info("Copying {0} to {1}".format(oozie_ext_zip_file, params.oozie_libext_dir))
+    Execute(("cp", oozie_ext_zip_file, params.oozie_libext_dir), sudo=True)
+    Execute(("chown", format("{oozie_user}:{user_group}"), oozie_ext_zip_target_path), sudo=True)
+    File(oozie_ext_zip_target_path,
+         mode=0644
+    )
+
+    # Redownload jdbc driver to a new current location
+    oozie.download_database_library_if_needed()
+
+    # get the upgrade version in the event that it's needed
+    upgrade_stack = stack_select._get_upgrade_stack()
+    if upgrade_stack is None or len(upgrade_stack) < 2 or upgrade_stack[1] is None:
+      raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
+
+    stack_version = upgrade_stack[1]
+
+    # copy the Falcon JAR if needed; falcon has not upgraded yet, so we must
+    # use the versioned falcon directory
+    if params.has_falcon_host:
+      versioned_falcon_jar_directory = "{0}/{1}/falcon/oozie/ext/falcon-oozie-el-extension-*.jar".format(params.stack_root, stack_version)
+      Logger.info("Copying {0} to {1}".format(versioned_falcon_jar_directory, params.oozie_libext_dir))
+
+      Execute(format('{sudo} cp {versioned_falcon_jar_directory} {oozie_libext_dir}'))
+      Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'))
+
+
+  @staticmethod
+  def prepare_warfile():
+    """
+    Invokes the 'prepare-war' command in Oozie in order to create the WAR.
+    The prepare-war command uses the input WAR from ${OOZIE_HOME}/oozie.war and
+    outputs the prepared WAR to ${CATALINA_BASE}/webapps/oozie.war - because of this,
+    both of these environment variables must point to the upgraded oozie-server path and
+    not oozie-client since it was not yet updated.
+
+    This method will also perform a kinit if necessary.
+    :return:
+    """
+    import params
+
+    # get the kerberos token if necessary to execute commands as oozie
+    if params.security_enabled:
+      oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+      command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
+      Execute(command, user=params.oozie_user, logoutput=True)
+
+    prepare_war(params)
+
+
+  def upgrade_oozie_database_and_sharelib(self, env):
+    """
+    Performs the creation and upload of the sharelib and the upgrade of the
+    database. This method will also perform a kinit if necessary.
+    It is run before the upgrade of oozie begins exactly once as part of the
+    upgrade orchestration.
+
+    Since this runs before the upgrade has occurred, it should not use any
+    "current" directories since they will still be pointing to the older
+    version of Oozie. Instead, it should use versioned directories to ensure
+    that the commands running are from the oozie version about to be upgraded to.
+    :return:
+    """
+    import params
+    env.set_params(params)
+
+    Logger.info("Will upgrade the Oozie database")
+
+    # get the kerberos token if necessary to execute commands as oozie
+    if params.security_enabled:
+      oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+      command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
+      Execute(command, user=params.oozie_user, logoutput=True)
+
+    upgrade_stack = stack_select._get_upgrade_stack()
+    if upgrade_stack is None or len(upgrade_stack) < 2 or upgrade_stack[1] is None:
+      raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
+
+    stack_version = upgrade_stack[1]
+
+    # upgrade oozie DB
+    Logger.info(format('Upgrading the Oozie database, using version {stack_version}'))
+
+    # the database upgrade requires the db driver JAR, but since we have
+    # not yet run <stack-selector-tool> to upgrade the current points, we have to use
+    # the versioned libext directory as the location[[-vufdtffr,
+    versioned_libext_dir = "{0}/{1}/oozie/libext".format(params.stack_root, stack_version)
+    oozie.download_database_library_if_needed(target_directory=versioned_libext_dir)
+
+    database_upgrade_command = "{0}/{1}/oozie/bin/ooziedb.sh upgrade -run".format(params.stack_root, stack_version)
+    Execute(database_upgrade_command, user=params.oozie_user, logoutput=True)
+
+    # install new sharelib to HDFS
+    self.create_sharelib(env)
+
+
+  def create_sharelib(self, env):
+    """
+    Performs the creation and upload of the sharelib.
+    This method will also perform a kinit if necessary.
+    It is run before the upgrade of oozie begins exactly once as part of the
+    upgrade orchestration.
+
+    Since this runs before the upgrade has occurred, it should not use any
+    "current" directories since they will still be pointing to the older
+    version of Oozie. Instead, it should use versioned directories to ensure
+    that the commands running are from the oozie version about to be upgraded to.
+    :param env:
+    :return:
+    """
+    import params
+    env.set_params(params)
+
+    Logger.info('Creating a new sharelib and uploading it to HDFS...')
+
+    # ensure the oozie directory exists for the sharelib
+    params.HdfsResource(format("{oozie_hdfs_user_dir}/share"),
+      action = "create_on_execute",
+      type = "directory",
+      owner = params.oozie_user,
+      group = params.user_group,
+      mode = 0755,
+      recursive_chmod = True)
+
+    params.HdfsResource(None, action = "execute")
+
+    upgrade_stack = stack_select._get_upgrade_stack()
+    if upgrade_stack is None or upgrade_stack[1] is None:
+      raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
+
+    stack_version = upgrade_stack[1]
+
+    # install new sharelib to HDFS
+    sharelib_command = "{0}/{1}/oozie/bin/oozie-setup.sh sharelib create -fs {2}".format(
+      params.stack_root, stack_version, params.fs_root)
+
+    Execute(sharelib_command, user=params.oozie_user, logoutput=True)
+
+if __name__ == "__main__":
+  OozieUpgrade().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_service.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_service.py
new file mode 100644
index 0000000..5fcbf45
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_service.py
@@ -0,0 +1,188 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+# Python Imports
+import os
+
+# Local Imports
+from oozie import copy_atlas_hive_hook_to_dfs_share_lib
+
+# Resource Managemente Imports
+from resource_management.core import shell, sudo
+from resource_management.core.shell import as_user
+from resource_management.core.logger import Logger
+from resource_management.core.resources.service import Service
+from resource_management.core.resources.system import Execute, File, Directory
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+from resource_management.core import Logger
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def oozie_service(action='start', upgrade_type=None):
+  import params
+
+  if action == 'start':
+    cmd = format("cmd /C \"cd /d {oozie_tmp_dir} && {oozie_home}\\bin\\ooziedb.cmd create -sqlfile oozie.sql -run\"")
+    Execute(cmd, user=params.oozie_user, ignore_failures=True)
+    Service(params.oozie_server_win_service_name, action="start")
+  elif action == 'stop':
+    Service(params.oozie_server_win_service_name, action="stop")
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def oozie_service(action = 'start', upgrade_type=None):
+  """
+  Starts or stops the Oozie service
+  :param action: 'start' or 'stop'
+  :param upgrade_type: type of upgrade, either "rolling" or "non_rolling"
+  skipped since a variation of them was performed during the rolling upgrade
+  :return:
+  """
+  import params
+
+  environment={'OOZIE_CONFIG': params.conf_dir}
+
+  if params.security_enabled:
+    if params.oozie_principal is None:
+      oozie_principal_with_host = 'missing_principal'
+    else:
+      oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+    kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host};")
+  else:
+    kinit_if_needed = ""
+
+  no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
+  
+  if action == 'start':
+    start_cmd = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozie-start.sh")
+    path_to_jdbc = params.target
+
+    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
+       params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
+       params.jdbc_driver_name == "org.postgresql.Driver" or \
+       params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+
+      if not params.jdbc_driver_jar:
+        path_to_jdbc = format("{oozie_libext_dir}/") + \
+                       params.default_connectors_map[params.jdbc_driver_name] if params.jdbc_driver_name in params.default_connectors_map else None
+        if not os.path.isfile(path_to_jdbc):
+          path_to_jdbc = format("{oozie_libext_dir}/") + "*"
+          error_message = "Error! Sorry, but we can't find jdbc driver with default name " + params.default_connectors_map[params.jdbc_driver_name] + \
+                " in oozie lib dir. So, db connection check can fail. Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'"
+          Logger.error(error_message)
+
+      db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{path_to_jdbc} org.apache.ambari.server.DBConnectionVerification '{oozie_jdbc_connection_url}' {oozie_metastore_user_name} {oozie_metastore_user_passwd!p} {jdbc_driver_name}")
+    else:
+      db_connection_check_command = None
+
+    if upgrade_type is None:
+      if not os.path.isfile(path_to_jdbc) and params.jdbc_driver_name == "org.postgresql.Driver":
+        print format("ERROR: jdbc file {target} is unavailable. Please, follow next steps:\n" \
+          "1) Download postgresql-9.0-801.jdbc4.jar.\n2) Create needed directory: mkdir -p {oozie_home}/libserver/\n" \
+          "3) Copy postgresql-9.0-801.jdbc4.jar to newly created dir: cp /path/to/jdbc/postgresql-9.0-801.jdbc4.jar " \
+          "{oozie_home}/libserver/\n4) Copy postgresql-9.0-801.jdbc4.jar to libext: cp " \
+          "/path/to/jdbc/postgresql-9.0-801.jdbc4.jar {oozie_home}/libext/\n")
+        exit(1)
+
+      if db_connection_check_command:
+        sudo.chmod(params.check_db_connection_jar, 0755)
+        Execute( db_connection_check_command, 
+                 tries=5, 
+                 try_sleep=10,
+                 user=params.oozie_user,
+        )
+
+      Execute( format("cd {oozie_tmp_dir} && {oozie_home}/bin/ooziedb.sh create -sqlfile oozie.sql -run"), 
+               user = params.oozie_user, not_if = no_op_test,
+               ignore_failures = True 
+      )
+      
+      if params.security_enabled:
+        Execute(kinit_if_needed,
+                user = params.oozie_user,
+        )
+
+      if params.sysprep_skip_copy_oozie_share_lib_to_hdfs:
+        Logger.info("Skipping creation of oozie sharelib as host is sys prepped")
+        # Copy current hive-site to hdfs:/user/oozie/share/lib/spark/
+        params.HdfsResource(format("{hdfs_share_dir}/lib/spark/hive-site.xml"),
+                            action="create_on_execute",
+                            type = 'file',
+                            mode=0444,
+                            owner=params.oozie_user,
+                            group=params.user_group,
+                            source=format("{hive_conf_dir}/hive-site.xml"),
+                            )
+        params.HdfsResource(None, action="execute")
+
+        hdfs_share_dir_exists = True # skip time-expensive hadoop fs -ls check
+      elif WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+        # check with webhdfs is much faster than executing hadoop fs -ls. 
+        util = WebHDFSUtil(params.hdfs_site, params.oozie_user, params.security_enabled)
+        list_status = util.run_command(params.hdfs_share_dir, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
+        hdfs_share_dir_exists = ('FileStatus' in list_status)
+      else:
+        # have to do time expensive hadoop fs -ls check.
+        hdfs_share_dir_exists = shell.call(format("{kinit_if_needed} hadoop --config {hadoop_conf_dir} dfs -ls {hdfs_share_dir} | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'"),
+                                 user=params.oozie_user)[0]
+                                 
+      if not hdfs_share_dir_exists:                      
+        Execute( params.put_shared_lib_to_hdfs_cmd, 
+                 user = params.oozie_user,
+                 path = params.execute_path 
+        )
+        params.HdfsResource(format("{oozie_hdfs_user_dir}/share"),
+                             type="directory",
+                             action="create_on_execute",
+                             mode=0755,
+                             recursive_chmod=True,
+        )
+        params.HdfsResource(None, action="execute")
+        
+
+    try:
+      # start oozie
+      Execute( start_cmd, environment=environment, user = params.oozie_user,
+        not_if = no_op_test )
+
+      copy_atlas_hive_hook_to_dfs_share_lib(upgrade_type, params.upgrade_direction)
+    except:
+      show_logs(params.oozie_log_dir, params.oozie_user)
+      raise
+
+  elif action == 'stop':
+    Directory(params.oozie_tmp_dir,
+              owner=params.oozie_user,
+              create_parents = True,
+    )
+
+    stop_cmd  = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozied.sh stop 60 -force")
+
+    try:
+      # stop oozie
+      Execute(stop_cmd, environment=environment, only_if  = no_op_test,
+        user = params.oozie_user)
+    except:
+      show_logs(params.oozie_log_dir, params.oozie_user)
+      raise
+
+    File(params.pid_file, action = "delete")

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params.py
new file mode 100644
index 0000000..f39d632
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions.copy_tarball import get_sysprep_skip_copy_tarballs_hdfs
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+java_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+
+
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+# By default, copy the tarballs to HDFS. If the cluster is sysprepped, then set based on the config.
+sysprep_skip_copy_oozie_share_lib_to_hdfs = False
+if host_sys_prepped:
+  sysprep_skip_copy_oozie_share_lib_to_hdfs = default("/configurations/cluster-env/sysprep_skip_copy_oozie_share_lib_to_hdfs", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
new file mode 100644
index 0000000..d30a465
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
@@ -0,0 +1,374 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import get_port_from_url
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.get_architecture import get_architecture
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+
+from resource_management.core.utils import PasswordString
+from ambari_commons.credential_store_helper import get_password_from_credential_store
+from urlparse import urlparse
+
+import status_params
+import os
+import re
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+architecture = get_architecture()
+
+
+# Needed since this writes out the Atlas Hive Hook config file.
+cluster_name = config['clusterName']
+
+hostname = config["hostname"]
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+stack_name = status_params.stack_name
+stack_name_uppercase = stack_name.upper()
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+
+stack_root = status_params.stack_root
+stack_version_unformatted =  status_params.stack_version_unformatted
+stack_version_formatted =  status_params.stack_version_formatted
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+
+#spark_conf
+spark_conf_dir = format("{stack_root}/current/spark-client/conf")
+
+#hadoop params
+if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE,stack_version_formatted):
+  stack_version = None
+  upgrade_stack = stack_select._get_upgrade_stack()
+  if upgrade_stack is not None and len(upgrade_stack) == 2 and upgrade_stack[1] is not None:
+    stack_version = upgrade_stack[1]
+
+  # oozie-server or oozie-client, depending on role
+  oozie_root = status_params.component_directory
+
+  # using the correct oozie root dir, format the correct location
+  oozie_lib_dir = format("{stack_root}/current/{oozie_root}")
+  oozie_setup_sh = format("{stack_root}/current/{oozie_root}/bin/oozie-setup.sh")
+  oozie_webapps_dir = format("{stack_root}/current/{oozie_root}/oozie-server/webapps")
+  oozie_webapps_conf_dir = format("{stack_root}/current/{oozie_root}/oozie-server/conf")
+  oozie_libext_dir = format("{stack_root}/current/{oozie_root}/libext")
+  oozie_server_dir = format("{stack_root}/current/{oozie_root}/oozie-server")
+  oozie_shared_lib = format("{stack_root}/current/{oozie_root}/share")
+  oozie_home = format("{stack_root}/current/{oozie_root}")
+  oozie_bin_dir = format("{stack_root}/current/{oozie_root}/bin")
+  oozie_examples_regex = format("{stack_root}/current/{oozie_root}/doc")
+
+  # set the falcon home for copying JARs; if in an upgrade, then use the version of falcon that
+  # matches the version of oozie
+  falcon_home = format("{stack_root}/current/falcon-client")
+  if stack_version is not None:
+    falcon_home = '{0}/{1}/falcon'.format(stack_root, stack_version)
+
+  conf_dir = format("{stack_root}/current/{oozie_root}/conf")
+  hive_conf_dir = format("{conf_dir}/action-conf/hive")
+
+else:
+  oozie_lib_dir = "/var/lib/oozie"
+  oozie_setup_sh = "/usr/lib/oozie/bin/oozie-setup.sh"
+  oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
+  oozie_webapps_conf_dir = "/var/lib/oozie/oozie-server/conf"
+  oozie_libext_dir = "/usr/lib/oozie/libext"
+  oozie_server_dir = "/var/lib/oozie/oozie-server"
+  oozie_shared_lib = "/usr/lib/oozie/share"
+  oozie_home = "/usr/lib/oozie"
+  oozie_bin_dir = "/usr/bin"
+  falcon_home = '/usr/lib/falcon'
+  conf_dir = "/etc/oozie/conf"
+  hive_conf_dir = "/etc/oozie/conf/action-conf/hive"
+  oozie_examples_regex = "/usr/share/doc/oozie-*"
+
+execute_path = oozie_bin_dir + os.pathsep + hadoop_bin_dir
+
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+smoke_hdfs_user_mode = 0770
+service_check_queue_name = default('/configurations/yarn-env/service_check.queue.name', 'default')
+
+# This config actually contains {oozie_user}
+oozie_admin_users = format(config['configurations']['oozie-env']['oozie_admin_users'])
+
+user_group = config['configurations']['cluster-env']['user_group']
+jdk_location = config['hostLevelParams']['jdk_location']
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+oozie_tmp_dir = default("configurations/oozie-env/oozie_tmp_dir", "/var/tmp/oozie")
+oozie_hdfs_user_dir = format("/user/{oozie_user}")
+oozie_pid_dir = status_params.oozie_pid_dir
+pid_file = status_params.pid_file
+hadoop_jar_location = "/usr/lib/hadoop/"
+java_share_dir = "/usr/share/java"
+java64_home = config['hostLevelParams']['java_home']
+java_exec = format("{java64_home}/bin/java")
+ext_js_file = "ext-2.2.zip"
+ext_js_path = format("/usr/share/{stack_name_uppercase}-oozie/{ext_js_file}")
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+oozie_heapsize = config['configurations']['oozie-env']['oozie_heapsize']
+oozie_permsize = config['configurations']['oozie-env']['oozie_permsize']
+
+limits_conf_dir = "/etc/security/limits.d"
+
+oozie_user_nofile_limit = default('/configurations/oozie-env/oozie_user_nofile_limit', 32000)
+oozie_user_nproc_limit = default('/configurations/oozie-env/oozie_user_nproc_limit', 16000)
+
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
+oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
+http_principal = config['configurations']['oozie-site']['oozie.authentication.kerberos.principal']
+oozie_site = config['configurations']['oozie-site']
+# Need this for yarn.nodemanager.recovery.dir in yarn-site
+yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
+yarn_resourcemanager_address = config['configurations']['yarn-site']['yarn.resourcemanager.address']
+zk_namespace = default('/configurations/oozie-site/oozie.zookeeper.namespace', 'oozie')
+zk_connection_string = default('/configurations/oozie-site/oozie.zookeeper.connection.string', None)
+jaas_file = os.path.join(conf_dir, 'zkmigrator_jaas.conf')
+stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
+
+credential_store_enabled = False
+if 'credentialStoreEnabled' in config:
+  credential_store_enabled = config['credentialStoreEnabled']
+
+if security_enabled:
+  oozie_site = dict(config['configurations']['oozie-site'])
+  oozie_principal_with_host = oozie_principal.replace('_HOST', hostname)
+
+  # If a user-supplied oozie.ha.authentication.kerberos.principal property exists in oozie-site,
+  # use it to replace the existing oozie.authentication.kerberos.principal value. This is to ensure
+  # that any special principal name needed for HA is used rather than the Ambari-generated value
+  if "oozie.ha.authentication.kerberos.principal" in oozie_site:
+    oozie_site['oozie.authentication.kerberos.principal'] = oozie_site['oozie.ha.authentication.kerberos.principal']
+    http_principal = oozie_site['oozie.authentication.kerberos.principal']
+
+  # If a user-supplied oozie.ha.authentication.kerberos.keytab property exists in oozie-site,
+  # use it to replace the existing oozie.authentication.kerberos.keytab value. This is to ensure
+  # that any special keytab file needed for HA is used rather than the Ambari-generated value
+  if "oozie.ha.authentication.kerberos.keytab" in oozie_site:
+    oozie_site['oozie.authentication.kerberos.keytab'] = oozie_site['oozie.ha.authentication.kerberos.keytab']
+
+  if stack_version_formatted and check_stack_feature(StackFeature.OOZIE_HOST_KERBEROS, stack_version_formatted):
+    #older versions of oozie have problems when using _HOST in principal
+    oozie_site['oozie.service.HadoopAccessorService.kerberos.principal'] = oozie_principal_with_host
+    oozie_site['oozie.authentication.kerberos.principal'] = http_principal.replace('_HOST', hostname)
+
+smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+oozie_keytab = default("/configurations/oozie-env/oozie_keytab", oozie_service_keytab)
+oozie_env_sh_template = config['configurations']['oozie-env']['content']
+
+oracle_driver_jar_name = "ojdbc6.jar"
+
+oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
+
+if credential_store_enabled:
+  if 'hadoop.security.credential.provider.path' in config['configurations']['oozie-site']:
+    cs_lib_path = config['configurations']['oozie-site']['credentialStoreClassPath']
+    java_home = config['hostLevelParams']['java_home']
+    alias = 'oozie.service.JPAService.jdbc.password'
+    provider_path = config['configurations']['oozie-site']['hadoop.security.credential.provider.path']
+    oozie_metastore_user_passwd = PasswordString(get_password_from_credential_store(alias, provider_path, cs_lib_path, java_home, jdk_location))
+  else:
+    raise Exception("hadoop.security.credential.provider.path property should be set")
+else:
+  oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
+
+oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
+oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir']
+oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
+oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
+oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
+if 'export OOZIE_HTTPS_PORT' in oozie_env_sh_template or 'oozie.https.port' in config['configurations']['oozie-site'] or 'oozie.https.keystore.file' in config['configurations']['oozie-site'] or 'oozie.https.keystore.pass' in config['configurations']['oozie-site']:
+  oozie_secure = '-secure'
+else:
+  oozie_secure = ''
+
+https_port = None
+# try to get https port form oozie-env content
+for line in oozie_env_sh_template.splitlines():
+  result = re.match(r"export\s+OOZIE_HTTPS_PORT=(\d+)", line)
+  if result is not None:
+    https_port = result.group(1)
+# or from oozie-site.xml
+if https_port is None and 'oozie.https.port' in config['configurations']['oozie-site']:
+  https_port = config['configurations']['oozie-site']['oozie.https.port']
+
+oozie_base_url = config['configurations']['oozie-site']['oozie.base.url']
+
+service_check_job_name = default("/configurations/oozie-env/service_check_job_name", "no-op")
+
+# construct proper url for https
+if https_port is not None:
+  parsed_url = urlparse(oozie_base_url)
+  oozie_base_url = oozie_base_url.replace(parsed_url.scheme, "https")
+  if parsed_url.port is None:
+    oozie_base_url.replace(parsed_url.hostname, ":".join([parsed_url.hostname, str(https_port)]))
+  else:
+    oozie_base_url = oozie_base_url.replace(str(parsed_url.port), str(https_port))
+
+oozie_setup_sh_current = oozie_setup_sh
+
+hdfs_site = config['configurations']['hdfs-site']
+fs_root = config['configurations']['core-site']['fs.defaultFS']
+
+if stack_version_formatted and check_stack_feature(StackFeature.OOZIE_SETUP_SHARED_LIB, stack_version_formatted):
+  put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs {fs_root} -locallib {oozie_shared_lib}")
+  # for older  
+else: 
+  put_shared_lib_to_hdfs_cmd = format("hadoop --config {hadoop_conf_dir} dfs -put {oozie_shared_lib} {oozie_hdfs_user_dir}")
+
+default_connectors_map = { "com.microsoft.sqlserver.jdbc.SQLServerDriver":"sqljdbc4.jar",
+                           "com.mysql.jdbc.Driver":"mysql-connector-java.jar",
+                           "org.postgresql.Driver":"postgresql-jdbc.jar",
+                           "oracle.jdbc.driver.OracleDriver":"ojdbc.jar",
+                           "sap.jdbc4.sqlanywhere.IDriver":"sajdbc4.jar"}
+
+jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
+# NOT SURE THAT IT'S A GOOD IDEA TO USE PATH TO CLASS IN DRIVER, MAYBE IT WILL BE BETTER TO USE DB TYPE.
+# BECAUSE PATH TO CLASSES COULD BE CHANGED
+sqla_db_used = False
+previous_jdbc_jar_name = None
+if jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
+  jdbc_driver_jar = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+elif jdbc_driver_name == "com.mysql.jdbc.Driver":
+  jdbc_driver_jar = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+elif jdbc_driver_name == "org.postgresql.Driver":
+  jdbc_driver_jar = format("{oozie_home}/libserver/postgresql-9.0-801.jdbc4.jar")  #oozie using it's own postgres jdbc
+  previous_jdbc_jar_name = None
+elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+  jdbc_driver_jar = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+elif jdbc_driver_name == "sap.jdbc4.sqlanywhere.IDriver":
+  jdbc_driver_jar = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
+  sqla_db_used = True
+else:
+  jdbc_driver_jar = ""
+  jdbc_symlink_name = ""
+  previous_jdbc_jar_name = None
+
+default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+driver_curl_source = format("{jdk_location}/{jdbc_driver_jar}")
+downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
+if jdbc_driver_name == "org.postgresql.Driver":
+  target = jdbc_driver_jar
+  previous_jdbc_jar = None
+else:
+  target = format("{oozie_libext_dir}/{jdbc_driver_jar}")
+  previous_jdbc_jar = format("{oozie_libext_dir}/{previous_jdbc_jar_name}")
+
+#constants for type2 jdbc
+jdbc_libs_dir = format("{oozie_libext_dir}/native/lib64")
+lib_dir_available = os.path.exists(jdbc_libs_dir)
+
+if sqla_db_used:
+  jars_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/java/*")
+  libs_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/native/lib64/*")
+  downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
+
+hdfs_share_dir = format("{oozie_hdfs_user_dir}/share")
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
+has_falcon_host = not len(falcon_host)  == 0
+
+oozie_server_hostnames = default("/clusterHostInfo/oozie_server", [])
+oozie_server_hostnames = sorted(oozie_server_hostnames)
+
+oozie_log_maxhistory = default('configurations/oozie-log4j/oozie_log_maxhistory',720)
+
+#oozie-log4j.properties
+if (('oozie-log4j' in config['configurations']) and ('content' in config['configurations']['oozie-log4j'])):
+  log4j_props = config['configurations']['oozie-log4j']['content']
+else:
+  log4j_props = None
+
+oozie_hdfs_user_mode = 0775
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+
+########################################################
+############# Atlas related params #####################
+########################################################
+#region Atlas Hooks needed by Hive on Oozie
+hive_atlas_application_properties = default('/configurations/hive-atlas-application.properties', {})
+
+if has_atlas_in_cluster():
+  atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+#endregion
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)
+
+is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
+
+# The logic for LZO also exists in HDFS' params.py
+io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
+lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+
+all_lzo_packages = get_lzo_packages(stack_version_unformatted)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_windows.py
new file mode 100644
index 0000000..1f939d4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_windows.py
@@ -0,0 +1,34 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+import os
+from status_params import *
+
+config = Script.get_config()
+
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
+oozie_root = os.environ['OOZIE_ROOT']
+oozie_home = os.environ['OOZIE_HOME']
+oozie_conf_dir = os.path.join(oozie_home,'conf')
+oozie_user = hadoop_user
+oozie_tmp_dir = "c:\\hadoop\\temp\\oozie"
+
+oozie_env_cmd_template = config['configurations']['oozie-env']['content']

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/service_check.py
new file mode 100644
index 0000000..ae7cb21
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/service_check.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import glob
+
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources import File
+from resource_management.core.source import StaticFile
+from resource_management.core.system import System
+from resource_management.libraries.functions import format
+from resource_management.libraries.script import Script
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+from resource_management.core.logger import Logger
+
+NO_DOCS_FOLDER_MESSAGE = "Cannot find {oozie_examples_regex}. Possible reason is that /etc/yum.conf contains" \
+" tsflags=nodocs which prevents this folder from being installed along with oozie-client package." \
+" If this is the case, please fix /etc/yum.conf and re-install the package."
+
+class OozieServiceCheck(Script):
+  pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class OozieServiceCheckDefault(OozieServiceCheck):
+
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    # on HDP1 this file is different
+    prepare_hdfs_file_name = 'prepareOozieHdfsDirectories.sh'
+    smoke_test_file_name = 'oozieSmoke2.sh'
+
+    OozieServiceCheckDefault.oozie_smoke_shell_file(smoke_test_file_name, prepare_hdfs_file_name)
+
+  @staticmethod
+  def oozie_smoke_shell_file(file_name, prepare_hdfs_file_name):
+    import params
+
+    File(format("{tmp_dir}/{file_name}"),
+         content=StaticFile(file_name),
+         mode=0755
+    )
+    File(format("{tmp_dir}/{prepare_hdfs_file_name}"),
+         content=StaticFile(prepare_hdfs_file_name),
+         mode=0755
+    )
+
+    os_family = System.get_instance().os_family
+    oozie_examples_dir_regex_matches = glob.glob(params.oozie_examples_regex)
+    if not oozie_examples_dir_regex_matches:
+      raise Fail(format(NO_DOCS_FOLDER_MESSAGE))
+    oozie_examples_dir = oozie_examples_dir_regex_matches[0]
+
+    Execute((format("{tmp_dir}/{prepare_hdfs_file_name}"), params.conf_dir, oozie_examples_dir, params.hadoop_conf_dir, params.yarn_resourcemanager_address, params.fs_root, params.service_check_queue_name, params.service_check_job_name),
+            tries=3,
+            try_sleep=5,
+            logoutput=True
+    )
+
+    params.HdfsResource(format("/user/{smokeuser}"),
+        type="directory",
+        action="create_on_execute",
+        owner=params.smokeuser,
+        mode=params.smoke_hdfs_user_mode,
+        )
+
+    examples_dir = format('/user/{smokeuser}/examples')
+    params.HdfsResource(examples_dir,
+                        action = "delete_on_execute",
+                        type = "directory"
+    )
+    params.HdfsResource(examples_dir,
+      action = "create_on_execute",
+      type = "directory",
+      source = format("{oozie_examples_dir}/examples"),
+      owner = params.smokeuser,
+      group = params.user_group
+    )
+
+    input_data_dir = format('/user/{smokeuser}/input-data')
+    params.HdfsResource(input_data_dir,
+                        action = "delete_on_execute",
+                        type = "directory"
+    )
+    params.HdfsResource(input_data_dir,
+      action = "create_on_execute",
+      type = "directory",
+      source = format("{oozie_examples_dir}/examples/input-data"),
+      owner = params.smokeuser,
+      group = params.user_group
+    )
+    params.HdfsResource(None, action="execute")
+
+    if params.security_enabled:
+      sh_cmd = format(
+        "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {service_check_job_name} {security_enabled} {smokeuser_keytab} {kinit_path_local} {smokeuser_principal}")
+    else:
+      sh_cmd = format(
+        "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {service_check_job_name} {security_enabled}")
+
+    Execute(sh_cmd,
+            path=params.execute_path,
+            tries=3,
+            try_sleep=5,
+            logoutput=True
+    )
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class OozieServiceCheckWindows(OozieServiceCheck):
+
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    smoke_cmd = os.path.join(params.stack_root, "Run-SmokeTests.cmd")
+    service = "OOZIE"
+    Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+if __name__ == "__main__":
+  OozieServiceCheck().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/status_params.py
new file mode 100644
index 0000000..ce990cf
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/status_params.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.version import format_stack_version
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'OOZIE_SERVER' : 'oozie-server',
+  'OOZIE_CLIENT' : 'oozie-client',
+  'OOZIE_SERVICE_CHECK' : 'oozie-client',
+  'ru_execute_tasks' : 'oozie-server'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "OOZIE_CLIENT")
+
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+if OSCheck.is_windows_family():
+  # windows service mapping
+  oozie_server_win_service_name = "oozieservice"
+else:
+  oozie_pid_dir = config['configurations']['oozie-env']['oozie_pid_dir']
+  pid_file = format("{oozie_pid_dir}/oozie.pid")
+
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+  conf_dir = "/etc/oozie/conf"
+  if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+    conf_dir = format("{stack_root}/current/{component_directory}/conf")
+
+  tmp_dir = Script.get_tmp_dir()
+  oozie_user = config['configurations']['oozie-env']['oozie_user']
+  hostname = config["hostname"]
+
+stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/adminusers.txt.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/adminusers.txt.j2 b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/adminusers.txt.j2
new file mode 100644
index 0000000..2a0f7b2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/adminusers.txt.j2
@@ -0,0 +1,28 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Users should be set using following rules:
+#
+#     One user name per line
+#     Empty lines and lines starting with '#' are ignored
+
+{% if oozie_admin_users %}
+{% for oozie_admin_user in oozie_admin_users.split(',') %}
+{{oozie_admin_user|trim}}
+{% endfor %}
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/input.config-oozie.json.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/input.config-oozie.json.j2 b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/input.config-oozie.json.j2
new file mode 100644
index 0000000..4a54f74
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/input.config-oozie.json.j2
@@ -0,0 +1,48 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"oozie_app",
+      "rowtype":"service",
+      "path":"{{default('/configurations/oozie-env/oozie_log_dir', '/var/log/oozie')}}/oozie.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "oozie_app"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{DATA:logger_name}:%{INT:line_number}%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/oozie-log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/oozie-log4j.properties.j2 b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/oozie-log4j.properties.j2
new file mode 100644
index 0000000..e39428f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/oozie-log4j.properties.j2
@@ -0,0 +1,93 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
+# XLogService sets its value to '${oozie.home}/logs'
+
+log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
+log4j.appender.oozie.Append=true
+log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
+log4j.appender.oozie.RollingPolicy.FileNamePattern=${log4j.appender.oozie.File}-%d{yyyy-MM-dd}
+log4j.appender.oozie.DatePattern='.'yyyy-MM-dd
+
+log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
+log4j.appender.oozieops.Append=true
+log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
+log4j.appender.oozieinstrumentation.Append=true
+log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
+log4j.appender.oozieaudit.Append=true
+log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
+log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
+log4j.appender.openjpa.Append=true
+log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
+log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.logger.openjpa=INFO, openjpa
+log4j.logger.oozieops=INFO, oozieops
+log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
+log4j.logger.oozieaudit=ALL, oozieaudit
+log4j.logger.org.apache.oozie=INFO, oozie
+log4j.logger.org.apache.hadoop=WARN, oozie
+log4j.logger.org.mortbay=WARN, oozie
+log4j.logger.org.hsqldb=WARN, oozie
+log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/oozie.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/oozie.conf.j2 b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/oozie.conf.j2
new file mode 100644
index 0000000..1f99e49
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/oozie.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{oozie_user}}   - nofile   {{oozie_user_nofile_limit}}
+{{oozie_user}}   - nproc    {{oozie_user_nproc_limit}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/zkmigrator_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/zkmigrator_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/zkmigrator_jaas.conf.j2
new file mode 100644
index 0000000..fbc0ce5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/zkmigrator_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+  com.sun.security.auth.module.Krb5LoginModule required
+  useKeyTab=true
+  storeKey=true
+  useTicketCache=false
+  keyTab="{{oozie_keytab}}"
+  principal="{{oozie_principal_with_host}}";
+};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/quicklinks/quicklinks.json
new file mode 100644
index 0000000..81e7cbe
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/quicklinks/quicklinks.json
@@ -0,0 +1,45 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"oozie.https.port",
+          "desired":"EXISTS",
+          "site":"oozie-site"
+        },
+        {
+          "property":"oozie.https.keystore.file",
+          "desired":"EXISTS",
+          "site":"oozie-site"
+        },
+        {
+          "property":"oozie.https.keystore.pass",
+          "desired":"EXISTS",
+          "site":"oozie-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "oozie_server_ui",
+        "component_name": "OOZIE_SERVER",
+        "label": "Oozie Web UI",
+        "requires_user_name": "true",
+        "url":"%@://%@:%@/oozie?user.name=%@",
+        "port":{
+          "http_property": "oozie.base.url",
+          "http_default_port": "11000",
+          "https_property": "oozie.base.url",
+          "https_default_port": "11443",
+          "regex": "\\w*:(\\d+)",
+          "site": "oozie-site"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/role_command_order.json
new file mode 100644
index 0000000..769e917
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/role_command_order.json
@@ -0,0 +1,9 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for OOZIE",
+    "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "OOZIE_SERVER-RESTART": ["NAMENODE-RESTART"],
+    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"]
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/themes/theme.json b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/themes/theme.json
new file mode 100644
index 0000000..5f325f7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/themes/theme.json
@@ -0,0 +1,116 @@
+{
+  "name": "default",
+  "description": "Default theme for Oozie service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "oozie-database",
+            "display-name": "Database",
+            "layout": {
+              "tab-rows": 1,
+              "tab-columns": 1,
+              "sections": [
+                {
+                  "name": "oozie-database-configurations",
+                  "display-name": "Database Configurations",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "0",
+                  "column-span": "0",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "oozie-database-configurations-col-1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "oozie-database-configurations-col-2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "oozie-env/oozie_database",
+          "subsection-name": "oozie-database-configurations-col-1"
+        },
+        {
+          "config": "oozie-site/oozie.db.schema.name",
+          "subsection-name": "oozie-database-configurations-col-1"
+        },
+        {
+          "config": "oozie-site/oozie.service.JPAService.jdbc.username",
+          "subsection-name": "oozie-database-configurations-col-1"
+        },
+        {
+          "config": "oozie-site/oozie.service.JPAService.jdbc.url",
+          "subsection-name": "oozie-database-configurations-col-1"
+        },
+        {
+          "config": "oozie-site/oozie.service.JPAService.jdbc.driver",
+          "subsection-name": "oozie-database-configurations-col-2"
+        },
+        {
+          "config": "oozie-site/oozie.service.JPAService.jdbc.password",
+          "subsection-name": "oozie-database-configurations-col-2"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "oozie-env/oozie_database",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "oozie-site/oozie.service.JPAService.jdbc.username",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "oozie-site/oozie.service.JPAService.jdbc.password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "oozie-site/oozie.service.JPAService.jdbc.driver",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "oozie-site/oozie.service.JPAService.jdbc.url",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "oozie-site/oozie.db.schema.name",
+        "widget": {
+          "type": "text-field"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/stacks/HDP/3.0/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/OOZIE/metainfo.xml
new file mode 100644
index 0000000..e1c73f1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/OOZIE/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>OOZIE</name>
+            <version>4.2.0.3.0</version>
+            <extends>common-services/OOZIE/4.2.0.3.0</extends>
+        </service>
+    </services>
+</metainfo>


[50/50] [abbrv] ambari git commit: Merge remote-tracking branch 'origin/trunk' into ambari-rest-api-explorer

Posted by ad...@apache.org.
Merge remote-tracking branch 'origin/trunk' into ambari-rest-api-explorer


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/651bdcbd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/651bdcbd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/651bdcbd

Branch: refs/heads/ambari-rest-api-explorer
Commit: 651bdcbdf86c8addace33b1f9bac066633296106
Parents: 51fc3cf 7c92953
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Tue May 23 11:52:00 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Tue May 23 11:52:00 2017 +0200

----------------------------------------------------------------------
 .../clusters/ClustersManageAccessCtrl.js        |    2 +-
 .../stackVersions/StackVersionsCreateCtrl.js    |   19 +
 .../stackVersions/StackVersionsEditCtrl.js      |  102 +-
 .../ui/admin-web/app/scripts/i18n.config.js     |    2 +-
 .../ui/admin-web/app/scripts/services/Stack.js  |    3 +-
 .../resources/ui/admin-web/app/views/main.html  |   50 +-
 .../views/stackVersions/stackVersionPage.html   |    3 +-
 .../ui/admin-web/app/views/users/create.html    |    2 +-
 ambari-agent/conf/unix/install-helper.sh        |    3 +-
 ambari-agent/pom.xml                            |    1 +
 .../src/main/python/ambari_agent/ActionQueue.py |    9 +-
 .../main/python/ambari_agent/AmbariConfig.py    |   83 +-
 .../src/main/python/ambari_agent/Controller.py  |   18 +-
 .../ambari_agent/CustomServiceOrchestrator.py   |   38 +-
 .../src/main/python/ambari_agent/Hardware.py    |    2 +
 .../src/main/python/ambari_agent/HostInfo.py    |   15 +-
 .../src/main/python/ambari_agent/NetUtil.py     |    5 +-
 .../ambari_agent/StatusCommandsExecutor.py      |  279 +-
 .../python/ambari_agent/alerts/web_alert.py     |    5 +-
 .../src/main/python/ambari_agent/hostname.py    |    8 +
 ambari-agent/src/packages/tarball/all.xml       |   11 +-
 .../test/python/ambari_agent/TestActionQueue.py |   13 +-
 .../test/python/ambari_agent/TestController.py  |   14 +
 .../TestCustomServiceOrchestrator.py            |   51 -
 .../test/python/ambari_agent/TestHardware.py    |    4 +-
 .../src/test/python/ambari_agent/TestShell.py   |    2 +-
 .../python/resource_management/TestScript.py    |   56 +-
 .../main/python/ambari_commons/inet_utils.py    |   43 +-
 .../src/main/python/ambari_commons/network.py   |   20 +-
 .../libraries/functions/conf_select.py          |   13 +-
 .../libraries/functions/curl_krb_request.py     |   17 +-
 .../libraries/functions/decorator.py            |   23 +-
 .../libraries/functions/jmx.py                  |    7 +-
 .../libraries/functions/namenode_ha_utils.py    |    6 +-
 .../functions/setup_ranger_plugin_xml.py        |   26 +-
 .../libraries/functions/version_select_util.py  |   40 +
 .../libraries/script/script.py                  |  171 +-
 .../HIVE/package/scripts/mysql_service.py       |    5 +
 ambari-infra/.gitignore                         |    6 +
 ambari-infra/ambari-infra-assembly/pom.xml      |   91 +
 .../src/main/package/deb/manager/control        |   22 +
 .../src/main/package/deb/manager/postinst       |   15 +
 .../src/main/package/deb/manager/postrm         |   15 +
 .../src/main/package/deb/manager/preinst        |   15 +
 .../src/main/package/deb/manager/prerm          |   15 +
 ambari-infra/ambari-infra-manager/README.md     |   31 +
 ambari-infra/ambari-infra-manager/build.xml     |   54 +
 .../ambari-infra-manager/docker/Dockerfile      |   52 +
 .../ambari-infra-manager/docker/bin/start.sh    |   21 +
 .../docker/infra-manager-docker.sh              |   85 +
 ambari-infra/ambari-infra-manager/pom.xml       |  431 ++
 .../org/apache/ambari/infra/InfraManager.java   |  186 +
 .../infra/common/InfraManagerConstants.java     |   29 +
 .../infra/conf/InfraManagerApiDocConfig.java    |   54 +
 .../ambari/infra/conf/InfraManagerConfig.java   |   36 +
 .../conf/batch/InfraManagerBatchConfig.java     |  282 +
 .../infra/job/dummy/DummyItemProcessor.java     |   36 +
 .../ambari/infra/job/dummy/DummyItemWriter.java |   36 +
 .../ambari/infra/job/dummy/DummyObject.java     |   40 +
 .../apache/ambari/infra/manager/JobManager.java |  274 +
 .../infra/model/ExecutionContextResponse.java   |   40 +
 .../ambari/infra/model/JobDetailsResponse.java  |   53 +
 .../model/JobExecutionDetailsResponse.java      |   49 +
 .../infra/model/JobExecutionInfoResponse.java   |  141 +
 .../ambari/infra/model/JobExecutionRequest.java |   46 +
 .../infra/model/JobExecutionRestartRequest.java |   52 +
 .../infra/model/JobExecutionStopRequest.java    |   50 +
 .../infra/model/JobInstanceDetailsResponse.java |   54 +
 .../infra/model/JobInstanceStartRequest.java    |   49 +
 .../ambari/infra/model/JobOperationParams.java  |   31 +
 .../apache/ambari/infra/model/JobRequest.java   |   37 +
 .../apache/ambari/infra/model/PageRequest.java  |   49 +
 .../model/StepExecutionContextResponse.java     |   58 +
 .../infra/model/StepExecutionInfoResponse.java  |  115 +
 .../model/StepExecutionProgressResponse.java    |   53 +
 .../infra/model/StepExecutionRequest.java       |   49 +
 .../infra/model/wrapper/JobExecutionData.java   |  118 +
 .../infra/model/wrapper/StepExecutionData.java  |  133 +
 .../ambari/infra/rest/JobExceptionMapper.java   |  110 +
 .../apache/ambari/infra/rest/JobResource.java   |  191 +
 .../src/main/resources/dummy/dummy.txt          |    3 +
 .../src/main/resources/infra-manager-env.sh     |   18 +
 .../src/main/resources/infra-manager.properties |   18 +
 .../src/main/resources/infraManager.sh          |   20 +
 .../src/main/resources/log4j.xml                |   31 +
 .../src/main/resources/swagger/swagger.html     |  115 +
 .../src/main/resources/webapp/index.html        |   24 +
 .../infra/solr/AmbariSolrCloudClient.java       |    4 +-
 .../src/main/resources/solrCloudCli.sh          |    2 +-
 ambari-infra/pom.xml                            |    1 +
 ambari-logsearch/README.md                      |    9 +-
 .../ambari-logsearch-config-api/.gitignore      |    1 +
 .../ambari-logsearch-config-api/pom.xml         |   49 +
 .../config/api/InputConfigMonitor.java          |   50 +
 .../config/api/LogLevelFilterMonitor.java       |   44 +
 .../logsearch/config/api/LogSearchConfig.java   |  132 +
 .../config/api/LogSearchConfigFactory.java      |   68 +
 .../api/model/inputconfig/Conditions.java       |   24 +
 .../config/api/model/inputconfig/Fields.java    |   26 +
 .../api/model/inputconfig/FilterDescriptor.java |   39 +
 .../model/inputconfig/FilterGrokDescriptor.java |   28 +
 .../model/inputconfig/FilterJsonDescriptor.java |   23 +
 .../inputconfig/FilterKeyValueDescriptor.java   |   28 +
 .../api/model/inputconfig/InputConfig.java      |   28 +
 .../api/model/inputconfig/InputDescriptor.java  |   54 +
 .../inputconfig/InputFileBaseDescriptor.java    |   28 +
 .../model/inputconfig/InputFileDescriptor.java  |   23 +
 .../inputconfig/InputS3FileDescriptor.java      |   26 +
 .../model/inputconfig/MapDateDescriptor.java    |   26 +
 .../inputconfig/MapFieldCopyDescriptor.java     |   24 +
 .../model/inputconfig/MapFieldDescriptor.java   |   24 +
 .../inputconfig/MapFieldNameDescriptor.java     |   24 +
 .../inputconfig/MapFieldValueDescriptor.java    |   26 +
 .../api/model/inputconfig/PostMapValues.java    |   26 +
 .../model/loglevelfilter/LogLevelFilter.java    |   79 +
 .../model/loglevelfilter/LogLevelFilterMap.java |   33 +
 .../config/api/LogSearchConfigClass1.java       |   73 +
 .../config/api/LogSearchConfigClass2.java       |   73 +
 .../config/api/LogSearchConfigFactoryTest.java  |   58 +
 .../config/api/NonLogSearchConfigClass.java     |   23 +
 .../src/test/resources/log4j.xml                |   34 +
 .../.gitignore                                  |    1 +
 .../ambari-logsearch-config-zookeeper/pom.xml   |   84 +
 .../config/zookeeper/LogSearchConfigZK.java     |  362 +
 .../model/inputconfig/impl/ConditionsImpl.java  |   37 +
 .../model/inputconfig/impl/FieldsImpl.java      |   39 +
 .../model/inputconfig/impl/FilterAdapter.java   |   42 +
 .../inputconfig/impl/FilterDescriptorImpl.java  |  113 +
 .../impl/FilterGrokDescriptorImpl.java          |   66 +
 .../impl/FilterJsonDescriptorImpl.java          |   25 +
 .../impl/FilterKeyValueDescriptorImpl.java      |   63 +
 .../model/inputconfig/impl/InputAdapter.java    |   58 +
 .../model/inputconfig/impl/InputConfigGson.java |   46 +
 .../model/inputconfig/impl/InputConfigImpl.java |   54 +
 .../inputconfig/impl/InputDescriptorImpl.java   |  204 +
 .../impl/InputFileBaseDescriptorImpl.java       |   66 +
 .../impl/InputFileDescriptorImpl.java           |   25 +
 .../impl/InputS3FileDescriptorImpl.java         |   53 +
 .../inputconfig/impl/MapDateDescriptorImpl.java |   58 +
 .../impl/MapFieldCopyDescriptorImpl.java        |   45 +
 .../impl/MapFieldNameDescriptorImpl.java        |   45 +
 .../impl/MapFieldValueDescriptorImpl.java       |   58 +
 .../inputconfig/impl/PostMapValuesAdapter.java  |   99 +
 .../inputconfig/impl/PostMapValuesImpl.java     |   40 +
 ambari-logsearch/ambari-logsearch-it/pom.xml    |  204 +-
 .../logsearch/domain/StoryDataRegistry.java     |   43 +-
 .../logsearch/steps/AbstractLogSearchSteps.java |  161 +
 .../logsearch/steps/LogSearchDockerSteps.java   |  197 +-
 .../logsearch/steps/LogSearchUISteps.java       |  212 +
 .../ambari/logsearch/steps/SolrSteps.java       |    4 +-
 .../logsearch/story/LogSearchApiQueryStory.java |   22 -
 .../story/LogSearchBackendStories.java          |   75 +
 .../ambari/logsearch/story/LogSearchStory.java  |   60 -
 .../logsearch/story/LogSearchStoryLocator.java  |   97 +
 .../logsearch/story/LogSearchUIStories.java     |   92 +
 .../logsearch/story/LogfeederParsingStory.java  |   22 -
 .../ambari/logsearch/web/AbstractPage.java      |   63 +
 .../org/apache/ambari/logsearch/web/Home.java   |   39 +
 .../story/log_search_api_query_story.story      |   17 -
 .../story/logfeeder_parsing_story.story         |   20 -
 .../stories/backend/log_search_api_tests.story  |   17 +
 .../backend/logfeeder_parsing_tests.story       |   20 +
 .../resources/stories/selenium/login.ui.story   |   20 +
 .../ambari-logsearch-logfeeder/pom.xml          |   66 +-
 .../org/apache/ambari/logfeeder/LogFeeder.java  |  386 +-
 .../ambari/logfeeder/common/ConfigBlock.java    |  107 +-
 .../ambari/logfeeder/common/ConfigHandler.java  |  420 ++
 .../ambari/logfeeder/common/ConfigItem.java     |   97 +
 .../apache/ambari/logfeeder/filter/Filter.java  |   53 +-
 .../ambari/logfeeder/filter/FilterGrok.java     |   11 +-
 .../ambari/logfeeder/filter/FilterJSON.java     |    3 -
 .../ambari/logfeeder/filter/FilterKeyValue.java |   12 +-
 .../logfeeder/input/AbstractInputFile.java      |   16 +-
 .../apache/ambari/logfeeder/input/Input.java    |  112 +-
 .../logfeeder/input/InputConfigUploader.java    |   94 +
 .../ambari/logfeeder/input/InputFile.java       |    6 +-
 .../ambari/logfeeder/input/InputManager.java    |  304 +-
 .../ambari/logfeeder/input/InputS3File.java     |    5 +-
 .../ambari/logfeeder/input/InputSimulate.java   |   41 +-
 .../logfeeder/logconfig/FilterLogData.java      |   87 -
 .../logfeeder/logconfig/LogConfigFetcher.java   |  168 -
 .../logfeeder/logconfig/LogConfigHandler.java   |  213 -
 .../logfeeder/logconfig/LogFeederFilter.java    |   90 -
 .../logconfig/LogFeederFilterWrapper.java       |   55 -
 .../logfeeder/loglevelfilter/FilterLogData.java |   73 +
 .../loglevelfilter/LogLevelFilterHandler.java   |  157 +
 .../apache/ambari/logfeeder/mapper/Mapper.java  |    4 +-
 .../ambari/logfeeder/mapper/MapperDate.java     |   15 +-
 .../logfeeder/mapper/MapperFieldCopy.java       |   13 +-
 .../logfeeder/mapper/MapperFieldName.java       |   14 +-
 .../logfeeder/mapper/MapperFieldValue.java      |   14 +-
 .../logfeeder/metrics/LogFeederAMSClient.java   |   12 +-
 .../apache/ambari/logfeeder/output/Output.java  |    3 -
 .../logfeeder/output/OutputLineFilter.java      |    2 +-
 .../ambari/logfeeder/output/OutputManager.java  |   19 +-
 .../ambari/logfeeder/output/OutputS3File.java   |   96 +-
 .../ambari/logfeeder/util/LogFeederUtil.java    |   73 +-
 .../apache/ambari/logfeeder/util/SSLUtil.java   |    5 +-
 .../src/main/resources/log4j.xml                |    6 +-
 .../ambari/logfeeder/filter/FilterGrokTest.java |   37 +-
 .../ambari/logfeeder/filter/FilterJSONTest.java |   14 +-
 .../logfeeder/filter/FilterKeyValueTest.java    |   41 +-
 .../ambari/logfeeder/input/InputFileTest.java   |   25 +-
 .../logfeeder/input/InputManagerTest.java       |   70 +-
 .../logconfig/LogConfigHandlerTest.java         |  108 +-
 .../ambari/logfeeder/mapper/MapperDateTest.java |   44 +-
 .../logfeeder/mapper/MapperFieldCopyTest.java   |   19 +-
 .../logfeeder/mapper/MapperFieldNameTest.java   |   19 +-
 .../logfeeder/mapper/MapperFieldValueTest.java  |   29 +-
 .../logfeeder/output/OutputLineFilterTest.java  |   22 +-
 .../logfeeder/output/OutputManagerTest.java     |   21 +-
 .../logfeeder/output/OutputS3FileTest.java      |   17 +-
 .../src/test/resources/logfeeder.properties     |    3 +-
 .../ambari-logsearch-server/pom.xml             |   18 +-
 .../org/apache/ambari/logsearch/LogSearch.java  |    1 +
 .../ambari/logsearch/common/MessageEnums.java   |    3 +-
 .../logsearch/common/PropertiesHelper.java      |    3 +
 .../ambari/logsearch/conf/SecurityConfig.java   |   15 +
 .../conf/global/LogSearchConfigState.java       |   35 +
 .../ambari/logsearch/configurer/Configurer.java |   23 +
 .../configurer/LogSearchConfigConfigurer.java   |   69 +
 .../configurer/LogfeederFilterConfigurer.java   |   66 -
 .../configurer/SolrAuditAliasConfigurer.java    |    2 +-
 .../configurer/SolrCollectionConfigurer.java    |   12 +-
 .../logsearch/configurer/SolrConfigurer.java    |   23 -
 .../ambari/logsearch/dao/AuditSolrDao.java      |    2 +-
 .../logsearch/dao/ServiceLogsSolrDao.java       |    2 +-
 .../ambari/logsearch/dao/UserConfigSolrDao.java |   81 +-
 .../ambari/logsearch/doc/DocConstants.java      |   12 +-
 .../handler/AbstractSolrConfigHandler.java      |  122 +
 .../logsearch/handler/UpgradeSchemaHandler.java |  139 +
 .../handler/UploadConfigurationHandler.java     |  164 +-
 .../logsearch/manager/AuditLogsManager.java     |    5 +
 .../ambari/logsearch/manager/ManagerBase.java   |   30 +
 .../logsearch/manager/ServiceLogsManager.java   |    5 +
 .../logsearch/manager/ShipperConfigManager.java |  108 +
 .../logsearch/manager/UserConfigManager.java    |   24 -
 .../model/common/LSServerConditions.java        |   41 +
 .../logsearch/model/common/LSServerFields.java  |   43 +
 .../logsearch/model/common/LSServerFilter.java  |  130 +
 .../model/common/LSServerFilterGrok.java        |   73 +
 .../model/common/LSServerFilterJson.java        |   31 +
 .../model/common/LSServerFilterKeyValue.java    |   71 +
 .../logsearch/model/common/LSServerInput.java   |  149 +
 .../model/common/LSServerInputConfig.java       |   87 +
 .../model/common/LSServerInputFile.java         |   31 +
 .../model/common/LSServerInputFileBase.java     |   72 +
 .../model/common/LSServerInputS3File.java       |   59 +
 .../model/common/LSServerLogLevelFilter.java    |  100 +
 .../model/common/LSServerLogLevelFilterMap.java |   65 +
 .../logsearch/model/common/LSServerMapDate.java |   61 +
 .../model/common/LSServerMapField.java          |   30 +
 .../model/common/LSServerMapFieldCopy.java      |   49 +
 .../model/common/LSServerMapFieldName.java      |   49 +
 .../model/common/LSServerMapFieldValue.java     |   61 +
 .../model/common/LSServerPostMapValues.java     |   63 +
 .../common/LSServerPostMapValuesSerializer.java |   39 +
 .../model/common/LogFeederDataMap.java          |   50 -
 .../model/common/LogfeederFilterData.java       |   87 -
 .../logsearch/rest/AuditLogsResource.java       |   10 +
 .../logsearch/rest/ServiceLogsResource.java     |   11 +
 .../logsearch/rest/ShipperConfigResource.java   |  108 +
 .../logsearch/rest/UserConfigResource.java      |   18 -
 .../web/filters/LogSearchConfigStateFilter.java |  102 +
 ...rchExternalServerAuthenticationProvider.java |    5 +-
 .../ambari-logsearch-web/.gitignore             |    1 +
 .../webapp/templates/common/Header_tmpl.html    |    5 +-
 ambari-logsearch/docker/Dockerfile              |   17 +-
 ambari-logsearch/docker/bin/start.sh            |    8 +
 ambari-logsearch/docker/logsearch-docker.sh     |   18 +-
 .../test-config/logfeeder/logfeeder.properties  |   13 +-
 .../test-config/logsearch/logsearch.properties  |    6 +-
 ambari-logsearch/pom.xml                        |    2 +
 ambari-metrics/ambari-metrics-assembly/pom.xml  |   20 +
 .../src/main/assembly/monitor-windows.xml       |    7 +
 .../src/main/assembly/monitor.xml               |    9 +-
 .../timeline/AbstractTimelineMetricsSink.java   |   26 +-
 .../sink/timeline/AggregationResult.java        |   60 +
 .../metrics2/sink/timeline/MetricAggregate.java |  110 +
 .../sink/timeline/MetricClusterAggregate.java   |   73 +
 .../sink/timeline/MetricHostAggregate.java      |   81 +
 .../metrics2/sink/timeline/TimelineMetric.java  |    6 +-
 .../TimelineMetricWithAggregatedValues.java     |   65 +
 .../AbstractTimelineMetricSinkTest.java         |   10 +
 .../availability/MetricCollectorHATest.java     |   10 +
 .../cache/HandleConnectExceptionTest.java       |   10 +
 .../sink/flume/FlumeTimelineMetricsSink.java    |   26 +-
 .../ambari-metrics/datasource.js                |  162 +-
 .../ambari-metrics/partials/query.editor.html   |   14 +-
 .../ambari-metrics/queryCtrl.js                 |    9 +-
 .../timeline/HadoopTimelineMetricsSink.java     |   27 +-
 .../timeline/HadoopTimelineMetricsSinkTest.java |   14 +-
 .../conf/unix/log4j.properties                  |   31 +
 .../conf/windows/log4j.properties               |   29 +
 .../ambari-metrics-host-aggregator/pom.xml      |  120 +
 .../AbstractMetricPublisherThread.java          |  134 +
 .../aggregator/AggregatedMetricsPublisher.java  |  101 +
 .../host/aggregator/AggregatorApplication.java  |  180 +
 .../host/aggregator/AggregatorWebService.java   |   56 +
 .../host/aggregator/RawMetricsPublisher.java    |   60 +
 .../host/aggregator/TimelineMetricsHolder.java  |   98 +
 .../conf/unix/ambari-metrics-monitor            |    2 +-
 .../src/main/python/core/aggregator.py          |  110 +
 .../main/python/core/application_metric_map.py  |    7 +-
 .../src/main/python/core/config_reader.py       |   41 +-
 .../src/main/python/core/controller.py          |   28 +
 .../src/main/python/core/emitter.py             |   12 +-
 .../src/main/python/core/stop_handler.py        |    3 +-
 .../src/main/python/main.py                     |    6 +-
 .../kafka/KafkaTimelineMetricsReporter.java     |   27 +
 .../kafka/KafkaTimelineMetricsReporterTest.java |    4 +
 .../storm/StormTimelineMetricsReporter.java     |   23 +
 .../sink/storm/StormTimelineMetricsSink.java    |   21 +
 .../ambari-metrics-storm-sink/pom.xml           |    2 +-
 .../storm/StormTimelineMetricsReporter.java     |   22 +
 .../sink/storm/StormTimelineMetricsSink.java    |   22 +
 .../ambari-metrics-timelineservice/pom.xml      |    2 +-
 .../timeline/HBaseTimelineMetricStore.java      |   78 +-
 .../metrics/timeline/PhoenixHBaseAccessor.java  |  112 +-
 .../timeline/TimelineMetricConfiguration.java   |    2 +
 .../metrics/timeline/TimelineMetricStore.java   |   10 +
 .../timeline/TimelineMetricsAggregatorSink.java |    4 +-
 .../timeline/aggregators/MetricAggregate.java   |  110 -
 .../aggregators/MetricClusterAggregate.java     |   73 -
 .../aggregators/MetricHostAggregate.java        |   81 -
 .../TimelineMetricAppAggregator.java            |    1 +
 .../TimelineMetricClusterAggregator.java        |   13 +-
 .../TimelineMetricClusterAggregatorSecond.java  |    2 +
 .../TimelineMetricHostAggregator.java           |    1 +
 .../aggregators/TimelineMetricReadHelper.java   |    2 +
 .../TimelineMetricMetadataManager.java          |   38 +
 .../discovery/TimelineMetricMetadataSync.java   |   57 +
 .../timeline/query/PhoenixTransactSQL.java      |   12 +
 .../webapp/TimelineWebServices.java             |   49 +
 .../timeline/ITPhoenixHBaseAccessor.java        |    4 +-
 .../metrics/timeline/MetricTestHelper.java      |    2 +-
 .../timeline/PhoenixHBaseAccessorTest.java      |    4 +-
 .../timeline/TestMetricHostAggregate.java       |    8 +-
 .../timeline/TestTimelineMetricStore.java       |   11 +
 .../TimelineMetricsAggregatorMemorySink.java    |    4 +-
 .../aggregators/ITClusterAggregator.java        |   12 +-
 .../aggregators/ITMetricAggregator.java         |   13 +-
 ...melineMetricClusterAggregatorSecondTest.java |    1 +
 .../timeline/discovery/TestMetadataManager.java |    8 +
 .../timeline/discovery/TestMetadataSync.java    |   12 +
 ambari-metrics/pom.xml                          |    1 +
 ambari-server/conf/unix/install-helper.sh       |   19 +-
 ambari-server/pom.xml                           |   16 +-
 ambari-server/src/main/assemblies/server.xml    |    7 +
 .../ambari/annotations/TransactionalLock.java   |    6 +-
 .../server/actionmanager/ActionDBAccessor.java  |   50 +-
 .../actionmanager/ActionDBAccessorImpl.java     |    5 +-
 .../server/actionmanager/ActionManager.java     |    4 +-
 .../server/actionmanager/ActionScheduler.java   |   25 +-
 .../server/actionmanager/HostRoleCommand.java   |   10 +
 .../ambari/server/actionmanager/Request.java    |   18 +-
 .../server/actionmanager/RequestFactory.java    |    4 +-
 .../ambari/server/actionmanager/Stage.java      |   16 +-
 .../server/actionmanager/StageFactory.java      |    1 -
 .../server/actionmanager/StageFactoryImpl.java  |    4 +-
 .../ambari/server/agent/ComponentStatus.java    |   28 +-
 .../ambari/server/agent/ExecutionCommand.java   |    2 +-
 .../ambari/server/agent/HeartBeatResponse.java  |    3 -
 .../ambari/server/agent/HeartbeatProcessor.java |   20 -
 .../ambari/server/alerts/AlertRunnable.java     |   27 +
 .../alerts/AmbariPerformanceRunnable.java       |    4 +-
 .../alerts/ComponentVersionAlertRunnable.java   |  195 +
 .../server/api/handlers/RequestHandler.java     |    2 +-
 .../api/predicate/expressions/Expression.java   |   14 +-
 .../predicate/operators/LogicalOperator.java    |    2 +-
 .../api/predicate/operators/Operator.java       |    8 +-
 .../predicate/operators/RelationalOperator.java |    2 +-
 .../ambari/server/api/query/JpaSortBuilder.java |    2 +-
 .../apache/ambari/server/api/query/Query.java   |   18 +-
 .../server/api/query/render/Renderer.java       |   12 +-
 .../api/resources/BaseResourceDefinition.java   |    3 +-
 .../RequestScheduleResourceDefinition.java      |    6 +-
 .../server/api/resources/ResourceInstance.java  |   12 +-
 .../api/resources/ResourceInstanceFactory.java  |    2 +-
 .../server/api/rest/BootStrapResource.java      |    2 +-
 .../api/services/AlertDefinitionService.java    |    3 -
 .../api/services/ResultPostProcessor.java       |    2 +-
 .../server/api/services/ResultStatus.java       |    4 +-
 .../api/services/parsers/RequestBodyParser.java |   12 +-
 .../persistence/PersistenceManager.java         |    6 +-
 .../StackAdvisorBlueprintProcessor.java         |   67 +-
 .../stackadvisor/StackAdvisorRequest.java       |    2 +-
 .../commands/StackAdvisorCommandType.java       |    2 +-
 .../apache/ambari/server/api/util/TreeNode.java |   28 +-
 .../ambari/server/bootstrap/BSRunner.java       |    9 +-
 .../server/checks/AbstractCheckDescriptor.java  |   25 +
 .../checks/DatabaseConsistencyCheckHelper.java  |  196 +-
 .../server/checks/RangerSSLConfigCheck.java     |    1 -
 .../ambari/server/checks/ServicesUpCheck.java   |    2 -
 .../ambari/server/checks/UpgradeCheckGroup.java |    2 +-
 .../server/configuration/Configuration.java     |   81 +-
 .../AmbariCustomCommandExecutionHelper.java     |   27 +-
 .../controller/AmbariManagementController.java  |   12 +-
 .../AmbariManagementControllerImpl.java         |   82 +-
 .../controller/AmbariManagementHelper.java      |  175 +
 .../ambari/server/controller/AmbariServer.java  |    9 +-
 .../server/controller/KerberosHelper.java       |    2 +-
 .../server/controller/KerberosHelperImpl.java   |   51 +-
 .../controller/MaintenanceStateHelper.java      |    4 +-
 .../server/controller/RequestRequest.java       |   11 +
 .../controller/ServiceComponentResponse.java    |   37 +-
 .../gsinstaller/ClusterDefinition.java          |  434 --
 .../gsinstaller/GSInstallerClusterProvider.java |   71 -
 .../GSInstallerComponentProvider.java           |   88 -
 .../GSInstallerHostComponentProvider.java       |   99 -
 .../gsinstaller/GSInstallerHostProvider.java    |   86 -
 .../gsinstaller/GSInstallerNoOpProvider.java    |   60 -
 .../gsinstaller/GSInstallerProviderModule.java  |   93 -
 .../GSInstallerResourceProvider.java            |  234 -
 .../gsinstaller/GSInstallerServiceProvider.java |   82 -
 .../gsinstaller/GSInstallerStateProvider.java   |   35 -
 .../internal/AbstractProviderModule.java        |    2 +-
 .../internal/AbstractResourceProvider.java      |    2 +-
 .../internal/ArtifactResourceProvider.java      |   20 +-
 .../BlueprintConfigurationProcessor.java        |  145 +-
 .../internal/ClientConfigResourceProvider.java  |   41 +-
 .../ClusterStackVersionResourceProvider.java    |  153 +-
 .../internal/ComponentResourceProvider.java     |   11 +-
 .../internal/ExportBlueprintRequest.java        |    6 -
 .../internal/HostResourceProvider.java          |    4 +-
 .../HostStackVersionResourceProvider.java       |    9 +-
 .../internal/JobResourceProvider.java           |    8 +-
 .../internal/ObservableResourceProvider.java    |    4 +-
 .../internal/ProvisionClusterRequest.java       |   21 +-
 .../internal/RequestResourceProvider.java       |   96 +-
 .../internal/RequestStageContainer.java         |   11 +-
 .../internal/ResourceProviderObserver.java      |    2 +-
 .../internal/ScaleClusterRequest.java           |    7 -
 .../internal/StageResourceProvider.java         |   89 +-
 .../internal/TaskAttemptResourceProvider.java   |   10 +-
 .../internal/TaskResourceProvider.java          |    3 +
 .../internal/UpgradeResourceProvider.java       |   72 +-
 .../VersionDefinitionResourceProvider.java      |    5 +-
 .../internal/WidgetLayoutResourceProvider.java  |    2 +-
 .../internal/WidgetResourceProvider.java        |    2 +-
 .../internal/WorkflowResourceProvider.java      |    8 +-
 .../server/controller/ivory/IvoryService.java   |   34 +-
 .../controller/jdbc/ConnectionFactory.java      |    2 +-
 .../server/controller/jmx/JMXHostProvider.java  |   10 +-
 .../logging/LogSearchDataRetrievalService.java  |   75 +-
 .../logging/LoggingRequestHelper.java           |    8 +-
 .../logging/LoggingRequestHelperImpl.java       |    6 +
 .../controller/metrics/MetricHostProvider.java  |   10 +-
 .../metrics/MetricsPaddingMethod.java           |    2 +-
 .../metrics/MetricsServiceProvider.java         |    4 +-
 .../controller/predicate/BasePredicate.java     |    2 +-
 .../controller/predicate/PredicateVisitor.java  |   10 +-
 .../predicate/PredicateVisitorAcceptor.java     |    2 +-
 .../spi/ExtendedResourceProvider.java           |    2 +-
 .../server/controller/spi/PageRequest.java      |   10 +-
 .../server/controller/spi/PageResponse.java     |   10 +-
 .../ambari/server/controller/spi/Predicate.java |    2 +-
 .../server/controller/spi/PropertyProvider.java |    4 +-
 .../server/controller/spi/ProviderModule.java   |    4 +-
 .../server/controller/spi/QueryResponse.java    |    8 +-
 .../server/controller/spi/RequestStatus.java    |    8 +-
 .../spi/ResourcePredicateEvaluator.java         |    2 +-
 .../ambari/server/controller/spi/Schema.java    |    4 +-
 .../server/controller/spi/SchemaFactory.java    |    2 +-
 .../server/controller/spi/SortRequest.java      |    6 +-
 .../controller/utilities/StreamProvider.java    |    4 +-
 .../events/AlertDefinitionDisabledEvent.java    |    2 +-
 .../EntityManagerCacheInvalidationEvent.java    |    5 -
 .../alerts/AlertMaintenanceModeListener.java    |   60 +-
 .../listeners/alerts/AlertReceivedListener.java |    2 +-
 .../listeners/upgrade/StackVersionListener.java |   20 +-
 .../server/hooks/users/UserHookService.java     |    3 +-
 .../system/impl/AmbariMetricSinkImpl.java       |   47 +-
 .../server/notifications/DispatchCallback.java  |    4 +-
 .../apache/ambari/server/orm/DBAccessor.java    |   33 +-
 .../ambari/server/orm/DBAccessorImpl.java       |   49 +
 .../server/orm/JPATableGenerationStrategy.java  |    2 +-
 .../orm/cache/ConfigGroupHostMapping.java       |   16 +-
 .../server/orm/cache/HostConfigMapping.java     |   32 +-
 .../ambari/server/orm/dao/ExtensionDAO.java     |    2 +-
 .../ambari/server/orm/dao/ExtensionLinkDAO.java |   39 +-
 .../server/orm/dao/HostComponentStateDAO.java   |   20 +
 .../server/orm/dao/HostRoleCommandDAO.java      |   18 +-
 .../ambari/server/orm/dao/RequestDAO.java       |  264 +-
 .../dao/ServiceComponentDesiredStateDAO.java    |   24 +
 .../server/orm/dao/TopologyHostTaskDAO.java     |   11 +
 .../orm/dao/TopologyLogicalRequestDAO.java      |   12 +
 .../server/orm/dao/TopologyLogicalTaskDAO.java  |   12 +
 .../ambari/server/orm/dao/UpgradeDAO.java       |   30 +-
 .../orm/entities/AlertDefinitionEntity.java     |    2 +-
 .../orm/entities/BlueprintConfiguration.java    |   16 +-
 .../orm/entities/ClusterConfigEntity.java       |   11 +
 .../orm/entities/ExecutionCommandEntity.java    |    5 +
 .../orm/entities/ExtensionLinkEntity.java       |    1 +
 .../orm/entities/HostComponentStateEntity.java  |    7 +-
 .../orm/entities/HostRoleCommandEntity.java     |   27 +-
 .../server/orm/entities/PrivilegeEntity.java    |   10 +
 .../orm/entities/RepositoryVersionEntity.java   |    6 +-
 .../server/orm/entities/RequestEntity.java      |   25 +
 .../entities/RequestOperationLevelEntity.java   |    4 +-
 .../entities/RequestResourceFilterEntity.java   |    5 +
 .../orm/entities/RoleSuccessCriteriaEntity.java |    5 +
 .../ServiceComponentDesiredStateEntity.java     |   19 +
 .../entities/ServiceComponentVersionEntity.java |   26 +-
 .../ambari/server/orm/entities/StageEntity.java |   24 +-
 .../server/orm/entities/StageEntity_.java       |    4 -
 .../orm/entities/TopologyHostRequestEntity.java |    5 +
 .../orm/entities/TopologyHostTaskEntity.java    |   15 +-
 .../entities/TopologyLogicalRequestEntity.java  |    5 +
 .../orm/entities/TopologyLogicalTaskEntity.java |   30 +-
 .../server/orm/entities/UpgradeEntity.java      |    7 +-
 .../server/orm/entities/UpgradeGroupEntity.java |    2 +-
 .../server/orm/entities/UpgradeItemEntity.java  |    5 +
 .../ambari/server/orm/entities/UserEntity.java  |    5 +-
 .../ambari/server/orm/entities/ViewEntity.java  |    9 +
 .../server/orm/entities/ViewInstanceEntity.java |    9 +
 .../server/orm/entities/ViewURLEntity.java      |    1 -
 .../server/orm/helpers/dbms/DbmsHelper.java     |   22 +-
 .../orm/helpers/dbms/GenericDbmsHelper.java     |    8 +
 .../server/orm/helpers/dbms/MySqlHelper.java    |   12 +
 .../server/orm/helpers/dbms/OracleHelper.java   |   12 +
 .../server/orm/helpers/dbms/PostgresHelper.java |   12 +
 .../ambari/server/scheduler/ExecutionJob.java   |   10 +-
 .../server/scheduler/ExecutionScheduler.java    |   16 +-
 .../ambari/server/security/SecurityHelper.java  |    4 +-
 .../AmbariPamAuthenticationProvider.java        |   25 +-
 .../authorization/LdapServerProperties.java     |    4 +-
 .../security/authorization/ResourceType.java    |    2 +-
 .../server/security/authorization/UserName.java |   76 +
 .../server/security/authorization/Users.java    |    4 +-
 .../security/encryption/MasterKeyService.java   |    4 +-
 .../kerberos/KerberosConfigDataFile.java        |   14 +-
 .../kerberos/KerberosIdentityDataFile.java      |   24 +-
 .../upgrades/AbstractUpgradeServerAction.java   |   74 +
 .../upgrades/ComponentVersionCheckAction.java   |    2 +-
 .../upgrades/FinalizeUpgradeAction.java         |   41 +-
 .../FixCapacitySchedulerOrderingPolicy.java     |    5 +
 .../upgrades/UpdateDesiredStackAction.java      |   14 +-
 .../ambari/server/stack/ConfigurationInfo.java  |    6 +-
 .../ambari/server/stack/ExtensionModule.java    |    2 +
 .../stack/QuickLinksConfigurationModule.java    |    2 +-
 .../server/stack/StackDefinitionModule.java     |   16 +-
 .../ambari/server/stack/StackDirectory.java     |    4 +
 .../ambari/server/stack/StackManager.java       |   79 +-
 .../apache/ambari/server/stack/StackModule.java |    6 +-
 .../apache/ambari/server/stack/ThemeModule.java |    2 +-
 .../apache/ambari/server/stack/Validable.java   |   10 +-
 .../ambari/server/stageplanner/RoleGraph.java   |    2 +-
 .../apache/ambari/server/state/AlertState.java  |    4 +
 .../org/apache/ambari/server/state/Cluster.java |   54 +-
 .../server/state/CommandScriptDefinition.java   |    2 +-
 .../org/apache/ambari/server/state/Config.java  |   22 +-
 .../apache/ambari/server/state/ConfigImpl.java  |    2 +-
 .../server/state/CustomCommandDefinition.java   |    5 +
 .../ambari/server/state/ExtensionInfo.java      |   26 +-
 .../ambari/server/state/HostHealthStatus.java   |    2 +-
 .../ambari/server/state/ServiceComponent.java   |   15 +
 .../server/state/ServiceComponentImpl.java      |  166 +-
 .../apache/ambari/server/state/ServiceImpl.java |    2 +-
 .../apache/ambari/server/state/StackInfo.java   |   27 +-
 .../ambari/server/state/UpgradeContext.java     |    9 +
 .../ambari/server/state/UpgradeHelper.java      |    4 +-
 .../server/state/ValueAttributesInfo.java       |    2 +-
 .../ambari/server/state/action/Action.java      |   14 +-
 .../server/state/alert/AlertDefinitionHash.java |    2 -
 .../server/state/cluster/ClusterImpl.java       |  225 +-
 .../server/state/cluster/ClustersImpl.java      |    4 -
 .../server/state/configgroup/ConfigGroup.java   |   32 +-
 .../state/configgroup/ConfigGroupFactory.java   |   15 +-
 .../state/configgroup/ConfigGroupImpl.java      |    8 -
 .../server/state/fsm/MultipleArcTransition.java |    2 +-
 .../server/state/fsm/SingleArcTransition.java   |    2 +-
 .../ambari/server/state/fsm/StateMachine.java   |    6 +-
 .../server/state/fsm/StateMachineFactory.java   |    4 +-
 .../kerberos/AbstractKerberosDescriptor.java    |    2 +-
 .../state/repository/VersionDefinitionXml.java  |   24 +-
 .../state/scheduler/RequestExecution.java       |   56 +-
 .../state/services/MetricsRetrievalService.java |    6 +-
 .../state/stack/ExtensionMetainfoXml.java       |   11 +
 .../state/stack/upgrade/ClusterGrouping.java    |   31 +-
 .../upgrade/ConfigUpgradeChangeDefinition.java  |    3 +-
 .../server/state/stack/upgrade/Grouping.java    |    2 +-
 .../state/stack/upgrade/HostOrderGrouping.java  |    1 -
 .../state/stack/upgrade/StageWrapper.java       |   65 +
 .../ambari/server/state/stack/upgrade/Task.java |    6 +
 .../server/state/stack/upgrade/TaskWrapper.java |   33 +-
 .../state/stack/upgrade/TaskWrapperBuilder.java |    5 +-
 .../state/stack/upgrade/UpgradeFunction.java    |    2 +-
 .../ambari/server/topology/AmbariContext.java   |    3 +
 .../ambari/server/topology/Blueprint.java       |   42 +-
 .../server/topology/BlueprintFactory.java       |    2 +-
 .../ambari/server/topology/BlueprintImpl.java   |   15 +
 .../server/topology/BlueprintValidator.java     |    4 +-
 .../server/topology/BlueprintValidatorImpl.java |   88 +-
 .../topology/ClusterConfigurationRequest.java   |   67 +-
 .../ambari/server/topology/ClusterTopology.java |    2 +
 .../server/topology/ClusterTopologyImpl.java    |   37 +-
 .../ambari/server/topology/HostGroup.java       |   28 +-
 .../ambari/server/topology/LogicalRequest.java  |   38 +-
 .../ambari/server/topology/PersistedState.java  |    7 +
 .../server/topology/PersistedStateImpl.java     |   15 +-
 .../ambari/server/topology/TopologyManager.java |   99 +-
 .../ambari/server/topology/TopologyRequest.java |   22 +-
 .../server/topology/TopologyRequestFactory.java |    2 +-
 .../server/topology/TopologyValidator.java      |    2 +-
 .../server/topology/tasks/TopologyTask.java     |    4 +-
 .../validators/ChainedTopologyValidator.java    |   58 +
 .../validators/HiveServiceValidator.java        |    2 +-
 .../RequiredConfigPropertiesValidator.java      |  188 +
 .../validators/RequiredPasswordValidator.java   |    6 +-
 .../validators/StackConfigTypeValidator.java    |   64 +
 .../validators/TopologyValidatorFactory.java    |   35 +
 .../validators/TopologyValidatorService.java    |   52 +
 .../server/upgrade/AbstractUpgradeCatalog.java  |    5 +-
 .../server/upgrade/SchemaUpgradeHelper.java     |    2 +
 .../server/upgrade/UpgradeCatalog212.java       |    3 -
 .../server/upgrade/UpgradeCatalog250.java       |  100 +-
 .../server/upgrade/UpgradeCatalog251.java       |  169 +
 .../server/upgrade/UpgradeCatalog252.java       |   94 +
 .../server/upgrade/UpgradeCatalog300.java       |   80 +-
 .../apache/ambari/server/utils/StageUtils.java  |    8 +-
 .../server/view/ViewInstanceHandlerList.java    |    4 +-
 .../view/ViewInstanceOperationHandler.java      |   96 +
 .../apache/ambari/server/view/ViewRegistry.java |   57 +-
 .../view/persistence/SchemaManagerFactory.java  |    2 +-
 .../src/main/package/rpm/postinstall.sh         |   18 -
 ambari-server/src/main/python/ambari-server.py  |  307 +-
 .../src/main/python/ambari_server/dbCleanup.py  |   37 +-
 .../python/ambari_server/resourceFilesKeeper.py |   11 +-
 .../python/ambari_server/serverConfiguration.py |    1 +
 .../main/python/ambari_server/serverUpgrade.py  |   31 +-
 .../main/python/ambari_server/setupMpacks.py    |   47 +-
 .../src/main/python/azuredb_create_generator.py |   85 +
 ambari-server/src/main/resources/.gitignore     |    1 +
 .../main/resources/Ambari-DDL-Derby-CREATE.sql  |    7 +-
 .../main/resources/Ambari-DDL-MySQL-CREATE.sql  |    7 +-
 .../main/resources/Ambari-DDL-Oracle-CREATE.sql |    7 +-
 .../resources/Ambari-DDL-Postgres-CREATE.sql    |    7 +-
 .../resources/Ambari-DDL-SQLAnywhere-CREATE.sql |    7 +-
 .../resources/Ambari-DDL-SQLServer-CREATE.sql   |    7 +-
 ambari-server/src/main/resources/alerts.json    |   12 +
 .../configuration/accumulo-logsearch-conf.xml   |  124 -
 .../package/scripts/accumulo_script.py          |   50 -
 .../1.6.1.2.2.0/package/scripts/params.py       |   17 +-
 .../hadoop-metrics2-accumulo.properties.j2      |    3 +
 .../templates/input.config-accumulo.json.j2     |   92 +
 .../configuration/infra-logsearch-conf.xml      |   80 -
 .../0.1.0/package/scripts/infra_solr.py         |    6 +-
 .../0.1.0/package/scripts/params.py             |    1 +
 .../0.1.0/package/scripts/status_params.py      |    6 +
 .../templates/input.config-ambari-infra.json.j2 |   48 +
 .../0.1.0/configuration/ams-env.xml             |    8 +
 .../0.1.0/configuration/ams-logsearch-conf.xml  |  201 -
 .../0.1.0/configuration/ams-site.xml            |   11 +
 .../AMBARI_METRICS/0.1.0/metainfo.xml           |    3 +
 .../HDP/grafana-hbase-misc.json                 |   18 +-
 .../HDP/grafana-hbase-regionservers.json        |   17 +-
 .../HDP/grafana-hdfs-datanodes.json             |   18 +-
 .../HDP/grafana-hdfs-home.json                  |   18 +-
 .../HDP/grafana-hdfs-namenodes.json             |   18 +-
 .../HDP/grafana-hdfs-topn.json                  |   18 +-
 .../HDP/grafana-hive-hivemetastore.json         |   18 +-
 .../HDP/grafana-hive-hiverserver2.json          |   18 +-
 .../HDP/grafana-kafka-hosts.json                |   18 +-
 .../HDP/grafana-llapdaemon-daemons.json         |   17 +-
 .../HDP/grafana-llapdaemon-heatmaps.json        |   17 +-
 .../HDP/grafana-solr-cores-dashboard.json       |   18 +-
 .../HDP/grafana-solr-hosts-dashboard.json       |   18 +-
 .../HDP/grafana-yarn-nodemanagers.json          |   18 +-
 .../HDP/grafana-yarn-resourcemanagers.json      |   18 +-
 .../default/grafana-ambari-server-database.json |   19 +-
 .../default/grafana-ambari-server-topn.json     |   18 +-
 .../default/grafana-ambari-server.json          |   18 +-
 .../default/grafana-ams-hbase-misc.json         |   18 +-
 .../grafana-ams-hbase-regionservers.json        |   17 +-
 .../default/grafana-system-servers.json         |   18 +-
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py |   30 +
 .../0.1.0/package/scripts/metrics_collector.py  |   66 +-
 .../package/scripts/metrics_grafana_util.py     |   37 +-
 .../0.1.0/package/scripts/params.py             |   18 +-
 .../0.1.0/package/scripts/service_check.py      |   22 +-
 .../hadoop-metrics2-hbase.properties.j2         |    3 +
 .../input.config-ambari-metrics.json.j2         |  169 +
 .../package/templates/metric_monitor.ini.j2     |    9 +
 .../configuration/atlas-logsearch-conf.xml      |   80 -
 .../ATLAS/0.1.0.2.3/package/scripts/metadata.py |   14 +-
 .../package/scripts/metadata_server.py          |   78 -
 .../ATLAS/0.1.0.2.3/package/scripts/params.py   |    3 +
 .../templates/input.config-atlas.json.j2        |   48 +
 .../configuration/application-properties.xml    |   10 +-
 .../ATLAS/0.7.0.2.5/role_command_order.json     |    2 +-
 .../common-services/ATLAS/0.7.0.3.0/alerts.json |   39 +
 .../configuration/application-properties.xml    |  546 ++
 .../ATLAS/0.7.0.3.0/configuration/atlas-env.xml |  182 +
 .../0.7.0.3.0/configuration/atlas-log4j.xml     |  170 +
 .../configuration/atlas-solrconfig.xml          |  641 ++
 .../configuration/ranger-atlas-audit.xml        |  141 +
 .../ranger-atlas-plugin-properties.xml          |  132 +
 .../ranger-atlas-policymgr-ssl.xml              |   73 +
 .../configuration/ranger-atlas-security.xml     |   77 +
 .../ATLAS/0.7.0.3.0/kerberos.json               |  100 +
 .../ATLAS/0.7.0.3.0/metainfo.xml                |  190 +
 .../0.7.0.3.0/package/scripts/atlas_client.py   |   57 +
 .../ATLAS/0.7.0.3.0/package/scripts/metadata.py |  243 +
 .../package/scripts/metadata_server.py          |  187 +
 .../ATLAS/0.7.0.3.0/package/scripts/params.py   |  417 ++
 .../0.7.0.3.0/package/scripts/service_check.py  |   55 +
 .../package/scripts/setup_ranger_atlas.py       |   70 +
 .../0.7.0.3.0/package/scripts/status_params.py  |   60 +
 .../package/templates/atlas_hbase_setup.rb.j2   |   42 +
 .../package/templates/atlas_jaas.conf.j2        |   26 +
 .../package/templates/atlas_kafka_acl.sh.j2     |   41 +
 .../templates/input.config-atlas.json.j2        |   48 +
 .../package/templates/kafka_jaas.conf.j2        |   41 +
 .../ATLAS/0.7.0.3.0/quicklinks/quicklinks.json  |   36 +
 .../ATLAS/0.7.0.3.0/role_command_order.json     |    7 +
 .../ATLAS/0.7.0.3.0/service_advisor.py          |  441 ++
 .../ATLAS/0.7.0.3.0/themes/theme.json           |  619 ++
 .../ATLAS/0.7.0.3.0/themes/theme_version_2.json |  845 +++
 .../configuration/falcon-logsearch-conf.xml     |   80 -
 .../0.5.0.2.1/package/scripts/falcon_client.py  |   10 -
 .../0.5.0.2.1/package/scripts/falcon_server.py  |   59 -
 .../templates/input.config-falcon.json.j2       |   48 +
 .../configuration/flume-logsearch-conf.xml      |   80 -
 .../FLUME/1.4.0.2.0/package/scripts/params.py   |   24 +-
 .../templates/flume-metrics2.properties.j2      |    2 +
 .../templates/input.config-flume.json.j2        |   53 +
 .../configuration/hbase-logsearch-conf.xml      |  111 -
 .../0.96.0.2.0/package/scripts/hbase_master.py  |   49 -
 .../package/scripts/hbase_regionserver.py       |   49 -
 .../0.96.0.2.0/package/scripts/params_linux.py  |   17 +-
 .../package/scripts/phoenix_queryserver.py      |    6 +-
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |    2 +
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |    3 +-
 .../templates/input.config-hbase.json.j2        |   79 +
 .../common-services/HBASE/2.0.0.3.0/alerts.json |  127 +
 .../HBASE/2.0.0.3.0/configuration/hbase-env.xml |  279 +
 .../2.0.0.3.0/configuration/hbase-log4j.xml     |  188 +
 .../2.0.0.3.0/configuration/hbase-policy.xml    |   53 +
 .../2.0.0.3.0/configuration/hbase-site.xml      |  774 ++
 .../configuration/ranger-hbase-audit.xml        |  132 +
 .../ranger-hbase-plugin-properties.xml          |  135 +
 .../ranger-hbase-policymgr-ssl.xml              |   66 +
 .../configuration/ranger-hbase-security.xml     |   74 +
 .../HBASE/2.0.0.3.0/kerberos.json               |  160 +
 .../HBASE/2.0.0.3.0/metainfo.xml                |  232 +
 .../HBASE/2.0.0.3.0/metrics.json                | 4733 +++++++++++++
 .../2.0.0.3.0/package/files/draining_servers.rb |  164 +
 .../package/files/hbase-smoke-cleanup.sh        |   23 +
 .../2.0.0.3.0/package/files/hbaseSmokeVerify.sh |   34 +
 .../HBASE/2.0.0.3.0/package/scripts/__init__.py |   19 +
 .../2.0.0.3.0/package/scripts/functions.py      |   54 +
 .../HBASE/2.0.0.3.0/package/scripts/hbase.py    |  230 +
 .../2.0.0.3.0/package/scripts/hbase_client.py   |   81 +
 .../package/scripts/hbase_decommission.py       |   94 +
 .../2.0.0.3.0/package/scripts/hbase_master.py   |  163 +
 .../package/scripts/hbase_regionserver.py       |  174 +
 .../2.0.0.3.0/package/scripts/hbase_service.py  |   66 +
 .../2.0.0.3.0/package/scripts/hbase_upgrade.py  |   42 +
 .../HBASE/2.0.0.3.0/package/scripts/params.py   |   28 +
 .../2.0.0.3.0/package/scripts/params_linux.py   |  426 ++
 .../2.0.0.3.0/package/scripts/params_windows.py |   43 +
 .../package/scripts/phoenix_queryserver.py      |   92 +
 .../package/scripts/phoenix_service.py          |   56 +
 .../2.0.0.3.0/package/scripts/service_check.py  |   99 +
 .../package/scripts/setup_ranger_hbase.py       |  106 +
 .../2.0.0.3.0/package/scripts/status_params.py  |   68 +
 .../HBASE/2.0.0.3.0/package/scripts/upgrade.py  |  106 +
 .../package/templates/hbase-smoke.sh.j2         |   44 +
 .../2.0.0.3.0/package/templates/hbase.conf.j2   |   35 +
 .../package/templates/hbase_client_jaas.conf.j2 |   23 +
 .../templates/hbase_grant_permissions.j2        |   39 +
 .../package/templates/hbase_master_jaas.conf.j2 |   26 +
 .../templates/hbase_queryserver_jaas.conf.j2    |   26 +
 .../templates/hbase_regionserver_jaas.conf.j2   |   26 +
 .../templates/input.config-hbase.json.j2        |   79 +
 .../package/templates/regionservers.j2          |   20 +
 .../HBASE/2.0.0.3.0/quicklinks/quicklinks.json  |   97 +
 .../HBASE/2.0.0.3.0/role_command_order.json     |   10 +
 .../HBASE/2.0.0.3.0/service_advisor.py          |  675 ++
 .../HBASE/2.0.0.3.0/themes/theme.json           |  407 ++
 .../HBASE/2.0.0.3.0/widgets.json                |  510 ++
 .../configuration/hdfs-logsearch-conf.xml       |  248 -
 .../common-services/HDFS/2.1.0.2.0/metainfo.xml |    6 +-
 .../package/alerts/alert_metrics_deviation.py   |   10 +-
 .../HDFS/2.1.0.2.0/package/scripts/datanode.py  |  103 +-
 .../package/scripts/datanode_upgrade.py         |   38 +-
 .../2.1.0.2.0/package/scripts/hdfs_client.py    |   45 -
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |   11 +-
 .../2.1.0.2.0/package/scripts/journalnode.py    |   57 -
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py  |   57 -
 .../2.1.0.2.0/package/scripts/nfsgateway.py     |   58 -
 .../HDFS/2.1.0.2.0/package/scripts/params.py    |    2 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |    2 +
 .../HDFS/2.1.0.2.0/package/scripts/snamenode.py |   60 -
 .../HDFS/2.1.0.2.0/package/scripts/utils.py     |    5 +-
 .../2.1.0.2.0/package/scripts/zkfc_slave.py     |   43 -
 .../package/templates/input.config-hdfs.json.j2 |  216 +
 .../HDFS/3.0.0.3.0/configuration/hadoop-env.xml |  200 +-
 .../hadoop-metrics2.properties.xml              |    2 +
 .../HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml |  382 +-
 .../common-services/HDFS/3.0.0.3.0/metainfo.xml |   10 +-
 .../HDFS/3.0.0.3.0/package/scripts/datanode.py  |   58 -
 .../3.0.0.3.0/package/scripts/hdfs_client.py    |   45 -
 .../3.0.0.3.0/package/scripts/journalnode.py    |   57 -
 .../HDFS/3.0.0.3.0/package/scripts/namenode.py  |   57 -
 .../3.0.0.3.0/package/scripts/nfsgateway.py     |   58 -
 .../HDFS/3.0.0.3.0/package/scripts/snamenode.py |   60 -
 .../3.0.0.3.0/package/scripts/zkfc_slave.py     |   43 -
 .../HIVE/0.12.0.2.0/configuration/hive-env.xml  |    3 +-
 .../configuration/hive-logsearch-conf.xml       |  117 -
 .../package/alerts/alert_llap_app_status.py     |    4 +-
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |   25 +-
 .../package/scripts/hive_metastore.py           |   55 +-
 .../0.12.0.2.0/package/scripts/hive_server.py   |   61 -
 .../package/scripts/hive_server_interactive.py  |   64 -
 .../0.12.0.2.0/package/scripts/params_linux.py  |   28 +-
 .../package/scripts/setup_ranger_hive.py        |   58 +
 .../package/scripts/webhcat_server.py           |   67 -
 .../hadoop-metrics2-hivemetastore.properties.j2 |    2 +
 .../hadoop-metrics2-hiveserver2.properties.j2   |    2 +
 .../templates/hadoop-metrics2-llapdaemon.j2     |    4 +-
 .../hadoop-metrics2-llaptaskscheduler.j2        |    4 +-
 .../package/templates/input.config-hive.json.j2 |   85 +
 .../HIVE/2.1.0.3.0/configuration/hcat-env.xml   |   48 +-
 .../HIVE/2.1.0.3.0/configuration/hive-env.xml   |   80 +-
 .../configuration/hive-interactive-env.xml      |   63 +-
 .../common-services/HIVE/2.1.0.3.0/metainfo.xml |   11 +-
 .../package/alerts/alert_llap_app_status.py     |    4 +-
 .../2.1.0.3.0/package/scripts/hive_metastore.py |   55 +-
 .../2.1.0.3.0/package/scripts/hive_server.py    |   61 -
 .../package/scripts/hive_server_interactive.py  |   64 -
 .../2.1.0.3.0/package/scripts/params_linux.py   |   28 +-
 .../package/scripts/setup_ranger_hive.py        |   58 +
 .../2.1.0.3.0/package/scripts/webhcat_server.py |   67 -
 .../hadoop-metrics2-hivemetastore.properties.j2 |    2 +
 .../hadoop-metrics2-hiveserver2.properties.j2   |    2 +
 .../templates/hadoop-metrics2-llapdaemon.j2     |    4 +-
 .../hadoop-metrics2-llaptaskscheduler.j2        |    4 +-
 .../HIVE/2.1.0.3.0/service_advisor.py           |    7 +
 .../KAFKA/0.10.0.3.0/alerts.json                |   32 +
 .../0.10.0.3.0/configuration/kafka-broker.xml   |  559 ++
 .../0.10.0.3.0/configuration/kafka-env.xml      |  111 +
 .../0.10.0.3.0/configuration/kafka-log4j.xml    |  170 +
 .../configuration/kafka_client_jaas_conf.xml    |   41 +
 .../configuration/kafka_jaas_conf.xml           |   59 +
 .../configuration/ranger-kafka-audit.xml        |  130 +
 .../ranger-kafka-plugin-properties.xml          |  148 +
 .../ranger-kafka-policymgr-ssl.xml              |   66 +
 .../configuration/ranger-kafka-security.xml     |   64 +
 .../KAFKA/0.10.0.3.0/kerberos.json              |   76 +
 .../KAFKA/0.10.0.3.0/metainfo.xml               |  109 +
 .../KAFKA/0.10.0.3.0/metrics.json               |  239 +
 .../KAFKA/0.10.0.3.0/package/scripts/kafka.py   |  276 +
 .../0.10.0.3.0/package/scripts/kafka_broker.py  |  151 +
 .../KAFKA/0.10.0.3.0/package/scripts/params.py  |  341 +
 .../0.10.0.3.0/package/scripts/service_check.py |   70 +
 .../package/scripts/setup_ranger_kafka.py       |   90 +
 .../0.10.0.3.0/package/scripts/status_params.py |   26 +
 .../KAFKA/0.10.0.3.0/package/scripts/upgrade.py |   78 +
 .../KAFKA/0.10.0.3.0/package/scripts/utils.py   |   38 +
 .../templates/input.config-kafka.json.j2        |   92 +
 .../0.10.0.3.0/package/templates/kafka.conf.j2  |   35 +
 .../package/templates/kafka_client_jaas.conf.j2 |   29 +
 .../package/templates/kafka_jaas.conf.j2        |   41 +
 .../package/templates/tools-log4j.properties.j2 |   21 +
 .../KAFKA/0.10.0.3.0/role_command_order.json    |    7 +
 .../KAFKA/0.10.0.3.0/service_advisor.py         |  363 +
 .../KAFKA/0.10.0.3.0/widgets.json               |  182 +
 .../KAFKA/0.8.1/configuration/kafka-broker.xml  |   23 +-
 .../configuration/kafka-logsearch-conf.xml      |  124 -
 .../KAFKA/0.8.1/package/scripts/kafka_broker.py |    6 +
 .../KAFKA/0.8.1/package/scripts/params.py       |   18 +-
 .../0.8.1/package/scripts/service_check.py      |   15 +-
 .../templates/input.config-kafka.json.j2        |   92 +
 .../KAFKA/0.9.0/configuration/kafka-broker.xml  |    2 +-
 .../package/scripts/kerberos_client.py          |   21 -
 .../0.5.0.2.2/configuration/gateway-site.xml    |    2 +-
 .../configuration/knox-logsearch-conf.xml       |   93 -
 .../0.5.0.2.2/package/scripts/knox_gateway.py   |   61 -
 .../package/templates/input.config-knox.json.j2 |   60 +
 .../common-services/KNOX/0.5.0.3.0/alerts.json  |   32 +
 .../0.5.0.3.0/configuration/admin-topology.xml  |   97 +
 .../0.5.0.3.0/configuration/gateway-log4j.xml   |  110 +
 .../0.5.0.3.0/configuration/gateway-site.xml    |   71 +
 .../KNOX/0.5.0.3.0/configuration/knox-env.xml   |   83 +
 .../configuration/knoxsso-topology.xml          |  126 +
 .../KNOX/0.5.0.3.0/configuration/ldap-log4j.xml |   93 +
 .../configuration/ranger-knox-audit.xml         |  132 +
 .../ranger-knox-plugin-properties.xml           |  132 +
 .../configuration/ranger-knox-policymgr-ssl.xml |   66 +
 .../configuration/ranger-knox-security.xml      |   64 +
 .../KNOX/0.5.0.3.0/configuration/topology.xml   |  174 +
 .../KNOX/0.5.0.3.0/configuration/users-ldif.xml |  140 +
 .../KNOX/0.5.0.3.0/kerberos.json                |   81 +
 .../common-services/KNOX/0.5.0.3.0/metainfo.xml |  109 +
 .../package/files/validateKnoxStatus.py         |   43 +
 .../KNOX/0.5.0.3.0/package/scripts/knox.py      |  192 +
 .../0.5.0.3.0/package/scripts/knox_gateway.py   |  220 +
 .../KNOX/0.5.0.3.0/package/scripts/knox_ldap.py |   59 +
 .../KNOX/0.5.0.3.0/package/scripts/params.py    |   29 +
 .../0.5.0.3.0/package/scripts/params_linux.py   |  457 ++
 .../0.5.0.3.0/package/scripts/params_windows.py |   71 +
 .../0.5.0.3.0/package/scripts/service_check.py  |   96 +
 .../package/scripts/setup_ranger_knox.py        |  121 +
 .../0.5.0.3.0/package/scripts/status_params.py  |   59 +
 .../KNOX/0.5.0.3.0/package/scripts/upgrade.py   |  118 +
 .../package/templates/input.config-knox.json.j2 |   60 +
 .../package/templates/krb5JAASLogin.conf.j2     |   30 +
 .../KNOX/0.5.0.3.0/role_command_order.json      |    7 +
 .../KNOX/0.5.0.3.0/service_advisor.py           |  253 +
 .../configuration/logfeeder-ambari-config.xml   |    1 +
 .../logfeeder-custom-logsearch-conf.xml         |   46 -
 .../configuration/logfeeder-properties.xml      |   10 +
 .../logsearch-common-properties.xml             |   23 +
 .../configuration/logsearch-properties.xml      |   10 -
 .../scripts/logsearch_config_aggregator.py      |   77 -
 .../LOGSEARCH/0.5.0/package/scripts/params.py   |   57 +-
 .../0.5.0/package/scripts/service_check.py      |    9 +-
 .../0.5.0/package/scripts/setup_logfeeder.py    |   15 +-
 .../templates/HadoopServiceConfig.json.j2       |  545 +-
 .../templates/input.config-logsearch.json.j2    |    6 +-
 .../LOGSEARCH/0.5.0/themes/theme.json           |    4 +-
 .../configuration/oozie-logsearch-conf.xml      |   80 -
 .../4.0.0.2.0/package/scripts/oozie_server.py   |   63 -
 .../templates/input.config-oozie.json.j2        |   48 +
 .../common-services/OOZIE/4.2.0.3.0/alerts.json |   45 +
 .../OOZIE/4.2.0.3.0/configuration/oozie-env.xml |  255 +
 .../4.2.0.3.0/configuration/oozie-log4j.xml     |  149 +
 .../4.2.0.3.0/configuration/oozie-site.xml      |  254 +
 .../OOZIE/4.2.0.3.0/kerberos.json               |   70 +
 .../OOZIE/4.2.0.3.0/metainfo.xml                |  203 +
 .../package/alerts/alert_check_oozie_server.py  |  244 +
 .../4.2.0.3.0/package/files/oozieSmoke2.sh      |   84 +
 .../files/prepareOozieHdfsDirectories.sh        |   42 +
 .../4.2.0.3.0/package/files/wrap_ooziedb.sh     |   31 +
 .../scripts/check_oozie_server_status.py        |   38 +
 .../OOZIE/4.2.0.3.0/package/scripts/oozie.py    |  516 ++
 .../4.2.0.3.0/package/scripts/oozie_client.py   |   78 +
 .../4.2.0.3.0/package/scripts/oozie_server.py   |  163 +
 .../package/scripts/oozie_server_upgrade.py     |  237 +
 .../4.2.0.3.0/package/scripts/oozie_service.py  |  188 +
 .../OOZIE/4.2.0.3.0/package/scripts/params.py   |   39 +
 .../4.2.0.3.0/package/scripts/params_linux.py   |  374 +
 .../4.2.0.3.0/package/scripts/params_windows.py |   34 +
 .../4.2.0.3.0/package/scripts/service_check.py  |  140 +
 .../4.2.0.3.0/package/scripts/status_params.py  |   65 +
 .../package/templates/adminusers.txt.j2         |   28 +
 .../templates/input.config-oozie.json.j2        |   48 +
 .../package/templates/oozie-log4j.properties.j2 |   93 +
 .../4.2.0.3.0/package/templates/oozie.conf.j2   |   35 +
 .../package/templates/zkmigrator_jaas.conf.j2   |   26 +
 .../OOZIE/4.2.0.3.0/quicklinks/quicklinks.json  |   45 +
 .../OOZIE/4.2.0.3.0/role_command_order.json     |    9 +
 .../OOZIE/4.2.0.3.0/themes/theme.json           |  116 +
 .../configuration/ranger-logsearch-conf.xml     |  111 -
 .../0.4.0/package/scripts/setup_ranger_xml.py   |  119 +-
 .../templates/input.config-ranger.json.j2       |   79 +
 .../RANGER/0.5.0/role_command_order.json        |    2 +-
 .../RANGER/0.7.0.3.0/alerts.json                |   76 +
 .../0.7.0.3.0/configuration/admin-log4j.xml     |  132 +
 .../configuration/admin-properties.xml          |  163 +
 .../configuration/atlas-tagsync-ssl.xml         |   72 +
 .../configuration/ranger-admin-site.xml         |  785 +++
 .../0.7.0.3.0/configuration/ranger-env.xml      |  513 ++
 .../0.7.0.3.0/configuration/ranger-site.xml     |   30 +
 .../configuration/ranger-solr-configuration.xml |   59 +
 .../ranger-tagsync-policymgr-ssl.xml            |   72 +
 .../configuration/ranger-tagsync-site.xml       |  206 +
 .../configuration/ranger-ugsync-site.xml        |  574 ++
 .../tagsync-application-properties.xml          |   62 +
 .../0.7.0.3.0/configuration/tagsync-log4j.xml   |   90 +
 .../0.7.0.3.0/configuration/usersync-log4j.xml  |   89 +
 .../configuration/usersync-properties.xml       |   32 +
 .../RANGER/0.7.0.3.0/kerberos.json              |  153 +
 .../RANGER/0.7.0.3.0/metainfo.xml               |  189 +
 .../alerts/alert_ranger_admin_passwd_check.py   |  195 +
 .../RANGER/0.7.0.3.0/package/scripts/params.py  |  448 ++
 .../0.7.0.3.0/package/scripts/ranger_admin.py   |  217 +
 .../0.7.0.3.0/package/scripts/ranger_service.py |   69 +
 .../0.7.0.3.0/package/scripts/ranger_tagsync.py |  139 +
 .../package/scripts/ranger_usersync.py          |  124 +
 .../0.7.0.3.0/package/scripts/service_check.py  |   49 +
 .../0.7.0.3.0/package/scripts/setup_ranger.py   |  153 +
 .../package/scripts/setup_ranger_xml.py         |  853 +++
 .../0.7.0.3.0/package/scripts/status_params.py  |   39 +
 .../RANGER/0.7.0.3.0/package/scripts/upgrade.py |   31 +
 .../templates/input.config-ranger.json.j2       |   79 +
 .../package/templates/ranger_admin_pam.j2       |   22 +
 .../package/templates/ranger_remote_pam.j2      |   22 +
 .../package/templates/ranger_solr_jaas_conf.j2  |   26 +
 .../properties/ranger-solrconfig.xml.j2         | 1874 +++++
 .../RANGER/0.7.0.3.0/quicklinks/quicklinks.json |   41 +
 .../RANGER/0.7.0.3.0/role_command_order.json    |    9 +
 .../RANGER/0.7.0.3.0/service_advisor.py         |  793 +++
 .../0.7.0.3.0/themes/theme_version_1.json       |  722 ++
 .../0.7.0.3.0/themes/theme_version_2.json       | 1470 ++++
 .../0.7.0.3.0/themes/theme_version_3.json       |  692 ++
 .../0.7.0.3.0/themes/theme_version_5.json       |   48 +
 .../RANGER/0.7.0/configuration/ranger-env.xml   |   22 +
 .../0.7.0/properties/ranger-solrconfig.xml.j2   |    9 +-
 .../RANGER/0.7.0/themes/theme_version_5.json    |   26 +
 .../configuration/ranger-kms-logsearch-conf.xml |   80 -
 .../RANGER_KMS/0.5.0.2.3/metainfo.xml           |    9 +
 .../RANGER_KMS/0.5.0.2.3/package/scripts/kms.py |   20 +
 .../0.5.0.2.3/package/scripts/params.py         |   34 +-
 .../templates/input.config-ranger-kms.json.j2   |   48 +
 .../0.5.0.2.3/role_command_order.json           |    2 +-
 .../RANGER_KMS/0.5.0.3.0/alerts.json            |   32 +
 .../0.5.0.3.0/configuration/dbks-site.xml       |  206 +
 .../0.5.0.3.0/configuration/kms-env.xml         |  116 +
 .../0.5.0.3.0/configuration/kms-log4j.xml       |  120 +
 .../0.5.0.3.0/configuration/kms-properties.xml  |  166 +
 .../0.5.0.3.0/configuration/kms-site.xml        |  133 +
 .../configuration/ranger-kms-audit.xml          |  124 +
 .../configuration/ranger-kms-policymgr-ssl.xml  |   68 +
 .../configuration/ranger-kms-security.xml       |   64 +
 .../0.5.0.3.0/configuration/ranger-kms-site.xml |  104 +
 .../RANGER_KMS/0.5.0.3.0/kerberos.json          |   84 +
 .../RANGER_KMS/0.5.0.3.0/metainfo.xml           |  115 +
 .../RANGER_KMS/0.5.0.3.0/package/scripts/kms.py |  677 ++
 .../0.5.0.3.0/package/scripts/kms_server.py     |  117 +
 .../0.5.0.3.0/package/scripts/kms_service.py    |   58 +
 .../0.5.0.3.0/package/scripts/params.py         |  331 +
 .../0.5.0.3.0/package/scripts/service_check.py  |   41 +
 .../0.5.0.3.0/package/scripts/status_params.py  |   36 +
 .../0.5.0.3.0/package/scripts/upgrade.py        |   30 +
 .../templates/input.config-ranger-kms.json.j2   |   48 +
 .../0.5.0.3.0/role_command_order.json           |    7 +
 .../RANGER_KMS/0.5.0.3.0/service_advisor.py     |  281 +
 .../0.5.0.3.0/themes/theme_version_1.json       |  303 +
 .../0.5.0.3.0/themes/theme_version_2.json       |  124 +
 .../common-services/SPARK/1.2.1/alerts.json     |   48 +
 .../configuration/spark-logsearch-conf.xml      |   98 -
 .../scripts/alerts/alert_spark_livy_port.py     |  148 +
 .../scripts/alerts/alert_spark_thrift_port.py   |  151 +
 .../SPARK/1.2.1/package/scripts/params.py       |    3 +-
 .../1.2.1/package/scripts/service_check.py      |    2 +-
 .../templates/input.config-spark.json.j2        |   66 +
 .../SPARK/1.2.1/quicklinks/quicklinks.json      |    1 +
 .../common-services/SPARK/2.2.0/metainfo.xml    |   13 +-
 .../2.2.0/package/scripts/job_history_server.py |  108 +
 .../SPARK/2.2.0/package/scripts/livy_server.py  |  151 +
 .../SPARK/2.2.0/package/scripts/livy_service.py |   48 +
 .../SPARK/2.2.0/package/scripts/params.py       |  268 +
 .../2.2.0/package/scripts/service_check.py      |   62 +
 .../SPARK/2.2.0/package/scripts/setup_livy.py   |   88 +
 .../SPARK/2.2.0/package/scripts/setup_spark.py  |  116 +
 .../SPARK/2.2.0/package/scripts/spark_client.py |   62 +
 .../2.2.0/package/scripts/spark_service.py      |  146 +
 .../package/scripts/spark_thrift_server.py      |   91 +
 .../2.2.0/package/scripts/status_params.py      |   45 +
 .../SPARK/2.2.0/scripts/job_history_server.py   |  106 -
 .../SPARK/2.2.0/scripts/livy_server.py          |  149 -
 .../SPARK/2.2.0/scripts/livy_service.py         |   48 -
 .../SPARK/2.2.0/scripts/params.py               |  266 -
 .../SPARK/2.2.0/scripts/service_check.py        |   62 -
 .../SPARK/2.2.0/scripts/setup_livy.py           |   88 -
 .../SPARK/2.2.0/scripts/setup_spark.py          |  116 -
 .../SPARK/2.2.0/scripts/spark_client.py         |   60 -
 .../SPARK/2.2.0/scripts/spark_service.py        |  145 -
 .../SPARK/2.2.0/scripts/spark_thrift_server.py  |   89 -
 .../SPARK/2.2.0/scripts/status_params.py        |   45 -
 .../common-services/SPARK2/2.0.0/alerts.json    |   48 +
 .../configuration/spark2-logsearch-conf.xml     |   98 -
 .../scripts/alerts/alert_spark2_livy_port.py    |  148 +
 .../scripts/alerts/alert_spark2_thrift_port.py  |  152 +
 .../SPARK2/2.0.0/package/scripts/params.py      |    3 +-
 .../2.0.0/package/scripts/service_check.py      |    2 +-
 .../templates/input.config-spark2.json.j2       |   66 +
 .../SPARK2/2.0.0/quicklinks/quicklinks.json     |    1 +
 .../sqoop-atlas-application.properties.xml      |   47 +
 .../SQOOP/1.4.4.3.0/configuration/sqoop-env.xml |   87 +
 .../1.4.4.3.0/configuration/sqoop-site.xml      |   38 +
 .../SQOOP/1.4.4.3.0/kerberos.json               |   20 +
 .../SQOOP/1.4.4.3.0/metainfo.xml                |  115 +
 .../SQOOP/1.4.4.3.0/package/scripts/__init__.py |   19 +
 .../SQOOP/1.4.4.3.0/package/scripts/params.py   |   27 +
 .../1.4.4.3.0/package/scripts/params_linux.py   |  135 +
 .../1.4.4.3.0/package/scripts/params_windows.py |   30 +
 .../1.4.4.3.0/package/scripts/service_check.py  |   62 +
 .../SQOOP/1.4.4.3.0/package/scripts/sqoop.py    |  124 +
 .../1.4.4.3.0/package/scripts/sqoop_client.py   |   66 +
 .../SQOOP/1.4.4.3.0/role_command_order.json     |    6 +
 .../SQOOP/1.4.4.3.0/service_advisor.py          |  197 +
 .../STORM/0.9.1/configuration/storm-env.xml     |   11 +
 .../configuration/storm-logsearch-conf.xml      |  110 -
 .../STORM/0.9.1/package/scripts/drpc_server.py  |   52 -
 .../STORM/0.9.1/package/scripts/nimbus.py       |   45 -
 .../STORM/0.9.1/package/scripts/pacemaker.py    |   52 -
 .../STORM/0.9.1/package/scripts/params_linux.py |   14 +-
 .../STORM/0.9.1/package/scripts/ui_server.py    |   53 -
 .../0.9.1/package/templates/config.yaml.j2      |    2 +
 .../templates/input.config-storm.json.j2        |   78 +
 .../templates/storm-metrics2.properties.j2      |    2 +
 .../0.9.1/package/templates/storm_jaas.conf.j2  |    8 +
 .../common-services/STORM/1.0.1.3.0/alerts.json |  145 +
 .../configuration/ranger-storm-audit.xml        |  133 +
 .../ranger-storm-plugin-properties.xml          |  121 +
 .../ranger-storm-policymgr-ssl.xml              |   70 +
 .../configuration/ranger-storm-security.xml     |   67 +
 .../storm-atlas-application.properties.xml      |   31 +
 .../configuration/storm-cluster-log4j.xml       |  133 +
 .../STORM/1.0.1.3.0/configuration/storm-env.xml |  165 +
 .../1.0.1.3.0/configuration/storm-site.xml      | 1002 +++
 .../configuration/storm-worker-log4j.xml        |  189 +
 .../STORM/1.0.1.3.0/kerberos.json               |  134 +
 .../STORM/1.0.1.3.0/metainfo.xml                |  179 +
 .../STORM/1.0.1.3.0/metrics.json                | 1202 ++++
 .../alerts/check_supervisor_process_win.py      |   50 +
 .../STORM/1.0.1.3.0/package/files/wordCount.jar |  Bin 0 -> 690588 bytes
 .../1.0.1.3.0/package/scripts/drpc_server.py    |   91 +
 .../STORM/1.0.1.3.0/package/scripts/nimbus.py   |  116 +
 .../1.0.1.3.0/package/scripts/nimbus_prod.py    |   81 +
 .../1.0.1.3.0/package/scripts/pacemaker.py      |   90 +
 .../STORM/1.0.1.3.0/package/scripts/params.py   |   28 +
 .../1.0.1.3.0/package/scripts/params_linux.py   |  424 ++
 .../1.0.1.3.0/package/scripts/params_windows.py |   60 +
 .../STORM/1.0.1.3.0/package/scripts/rest_api.py |   85 +
 .../STORM/1.0.1.3.0/package/scripts/service.py  |   95 +
 .../1.0.1.3.0/package/scripts/service_check.py  |   79 +
 .../package/scripts/setup_ranger_storm.py       |  133 +
 .../1.0.1.3.0/package/scripts/status_params.py  |   83 +
 .../STORM/1.0.1.3.0/package/scripts/storm.py    |  182 +
 .../1.0.1.3.0/package/scripts/storm_upgrade.py  |  177 +
 .../package/scripts/storm_yaml_utils.py         |   53 +
 .../1.0.1.3.0/package/scripts/supervisor.py     |  117 +
 .../package/scripts/supervisor_prod.py          |   84 +
 .../package/scripts/supervisord_service.py      |   33 +
 .../1.0.1.3.0/package/scripts/ui_server.py      |  137 +
 .../package/templates/client_jaas.conf.j2       |   33 +
 .../1.0.1.3.0/package/templates/config.yaml.j2  |   72 +
 .../templates/input.config-storm.json.j2        |   78 +
 .../templates/storm-metrics2.properties.j2      |   32 +
 .../1.0.1.3.0/package/templates/storm.conf.j2   |   35 +
 .../package/templates/storm_jaas.conf.j2        |   65 +
 .../package/templates/worker-launcher.cfg.j2    |   19 +
 .../STORM/1.0.1.3.0/quicklinks/quicklinks.json  |   45 +
 .../STORM/1.0.1.3.0/role_command_order.json     |   13 +
 .../STORM/1.0.1.3.0/service_advisor.py          |  387 +
 .../STORM/1.0.1.3.0/widgets.json                |  127 +
 .../STORM/1.1.0/configuration/storm-site.xml    |   44 +
 .../common-services/STORM/1.1.0/kerberos.json   |  138 +
 .../common-services/STORM/1.1.0/metainfo.xml    |   44 +
 .../mapred-logsearch-conf.xml                   |   80 -
 .../configuration/yarn-logsearch-conf.xml       |  104 -
 .../scripts/application_timeline_server.py      |   61 -
 .../2.1.0.2.0/package/scripts/historyserver.py  |   56 -
 .../2.1.0.2.0/package/scripts/nodemanager.py    |   60 -
 .../package/scripts/resourcemanager.py          |   60 -
 .../YARN/2.1.0.2.0/package/scripts/yarn.py      |   36 +-
 .../templates/input.config-mapreduce2.json.j2   |   48 +
 .../package/templates/input.config-yarn.json.j2 |   72 +
 .../YARN/3.0.0.3.0/configuration/yarn-env.xml   |  206 +-
 .../YARN/3.0.0.3.0/configuration/yarn-log4j.xml |  126 +-
 .../YARN/3.0.0.3.0/configuration/yarn-site.xml  |   19 +-
 .../common-services/YARN/3.0.0.3.0/metainfo.xml |    4 +-
 .../scripts/application_timeline_server.py      |   61 -
 .../3.0.0.3.0/package/scripts/historyserver.py  |   56 -
 .../3.0.0.3.0/package/scripts/nodemanager.py    |   60 -
 .../package/scripts/resourcemanager.py          |   60 -
 .../YARN/3.0.0.3.0/package/scripts/yarn.py      |   36 +-
 .../YARN/3.0.0.3.0/service_advisor.py           |  108 +
 .../configuration/zeppelin-logsearch-conf.xml   |   80 -
 .../0.6.0.2.5/package/scripts/master.py         |    6 +
 .../templates/input.config-zeppelin.json.j2     |   48 +
 .../3.4.5/configuration/zookeeper-log4j.xml     |    2 +-
 .../configuration/zookeeper-logsearch-conf.xml  |   76 -
 .../3.4.5/package/scripts/zookeeper_server.py   |   51 -
 .../templates/input.config-zookeeper.json.j2    |   46 +
 .../custom_actions/scripts/install_packages.py  |    2 +-
 .../custom_actions/scripts/ru_execute_tasks.py  |    2 +
 .../src/main/resources/properties.json          |   10 +-
 .../scripts/check_ambari_permissions.py         |   25 +-
 .../scripts/relocate_host_components.py         |  489 --
 .../0.8/services/HDFS/package/scripts/params.py |    2 +-
 .../HIVE/package/scripts/mysql_service.py       |    5 +-
 .../HIVE/package/scripts/postgresql_service.py  |    5 +-
 .../2.0.6/hooks/after-INSTALL/scripts/params.py |   14 +
 .../scripts/shared_initialization.py            |   17 +
 .../before-ANY/scripts/shared_initialization.py |    4 +-
 .../2.0.6/hooks/before-START/scripts/params.py  |   16 +-
 .../templates/hadoop-metrics2.properties.j2     |    2 +
 .../HDP/2.0.6/properties/stack_features.json    |   21 +
 .../stacks/HDP/2.1/services/stack_advisor.py    |   11 +-
 .../services/YARN/configuration/yarn-site.xml   |    8 +-
 .../stacks/HDP/2.2/services/stack_advisor.py    |   61 +-
 .../stacks/HDP/2.3/role_command_order.json      |    4 +-
 .../stacks/HDP/2.3/services/stack_advisor.py    |   35 +-
 .../stacks/HDP/2.3/upgrades/config-upgrade.xml  |   31 +-
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml |   75 +-
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml |   73 +-
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml |   77 +-
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml |   77 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.3.xml     |    8 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.4.xml     |    5 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.5.xml     |    6 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.6.xml     |    6 +-
 .../stacks/HDP/2.4/upgrades/config-upgrade.xml  |   31 +-
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml |   73 +-
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml |   77 +-
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml |   79 +-
 .../stacks/HDP/2.4/upgrades/upgrade-2.4.xml     |    8 +-
 .../stacks/HDP/2.4/upgrades/upgrade-2.5.xml     |    4 +-
 .../stacks/HDP/2.4/upgrades/upgrade-2.6.xml     |    4 +-
 .../services/ATLAS/configuration/atlas-env.xml  |    2 +-
 .../stacks/HDP/2.5/services/stack_advisor.py    |   28 +
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  |  302 +-
 .../HDP/2.5/upgrades/host-upgrade-2.5.xml       |   50 +-
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml |   87 +-
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |  125 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.5.xml     |   10 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |    8 +-
 .../services/ATLAS/configuration/atlas-env.xml  |   41 +
 .../hadoop-metrics2.properties.xml              |    2 +
 .../configuration/hive-interactive-site.xml     |   11 +-
 .../HIVE/configuration/tez-interactive-site.xml |   37 +
 .../stacks/HDP/2.6/services/STORM/metainfo.xml  |    4 +
 .../services/YARN/configuration/yarn-env.xml    |   18 +
 .../services/YARN/configuration/yarn-site.xml   |   22 +
 .../stacks/HDP/2.6/services/stack_advisor.py    |  122 +-
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml  |   30 +
 .../HDP/2.6/upgrades/host-upgrade-2.6.xml       |   48 +-
 .../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml |   87 +-
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml     |    9 +-
 .../before-ANY/scripts/shared_initialization.py |    4 +-
 .../3.0/hooks/before-START/scripts/params.py    |   16 +-
 .../templates/hadoop-metrics2.properties.j2     |    2 +
 .../stacks/HDP/3.0/services/ATLAS/metainfo.xml  |   27 +
 .../stacks/HDP/3.0/services/HBASE/metainfo.xml  |   26 +
 .../services/HDFS/configuration/hadoop-env.xml  |    2 +-
 .../stacks/HDP/3.0/services/KAFKA/metainfo.xml  |   27 +
 .../stacks/HDP/3.0/services/KNOX/metainfo.xml   |   27 +
 .../stacks/HDP/3.0/services/OOZIE/metainfo.xml  |   27 +
 .../stacks/HDP/3.0/services/RANGER/metainfo.xml |   27 +
 .../HDP/3.0/services/RANGER_KMS/metainfo.xml    |   27 +
 .../stacks/HDP/3.0/services/SQOOP/metainfo.xml  |   27 +
 .../stacks/HDP/3.0/services/STORM/metainfo.xml  |   27 +
 .../YARN/configuration-mapred/mapred-env.xml    |    2 +-
 .../YARN/configuration-mapred/mapred-site.xml   |   12 +-
 .../configuration/hbase-logsearch-conf.xml      |  111 -
 .../templates/input.config-hbase.json.j2        |   79 +
 .../configuration/hdfs-logsearch-conf.xml       |  248 -
 .../package/templates/input.config-hdfs.json.j2 |  216 +
 .../KERBEROS/package/scripts/kerberos_client.py |   21 -
 .../src/main/resources/upgrade-pack.xsd         |    1 +
 .../src/main/sh/azuredb_create_generator.sh     |   26 +
 .../ExecutionCommandWrapperTest.java            |    4 +-
 .../ambari/server/actionmanager/StageTest.java  |    2 +-
 .../actionmanager/TestActionDBAccessorImpl.java |   34 +-
 .../server/actionmanager/TestActionManager.java |   44 +-
 .../actionmanager/TestActionScheduler.java      |   43 +-
 .../ambari/server/actionmanager/TestStage.java  |    5 +-
 .../server/agent/HeartbeatProcessorTest.java    |   11 +-
 .../server/agent/HeartbeatTestHelper.java       |    4 +-
 .../server/agent/TestHeartbeatHandler.java      |   17 +-
 .../ComponentVersionAlertRunnableTest.java      |  362 +
 .../server/api/query/JpaSortBuilderTest.java    |    4 -
 .../render/ClusterBlueprintRendererTest.java    |   38 +-
 .../resources/BaseResourceDefinitionTest.java   |    2 +-
 .../server/api/services/AmbariMetaInfoTest.java |   14 +-
 .../StackAdvisorBlueprintProcessorTest.java     |   10 +-
 .../checks/AbstractCheckDescriptorTest.java     |   87 +-
 .../DatabaseConsistencyCheckHelperTest.java     |   73 +-
 ...duce2JobHistoryStatePreservingCheckTest.java |   15 +
 .../checks/ServiceCheckValidityCheckTest.java   |    1 -
 .../ServicesNamenodeTruncateCheckTest.java      |   17 +
 .../AmbariManagementControllerImplTest.java     |    6 +-
 .../AmbariManagementControllerTest.java         |   27 +-
 .../server/controller/KerberosHelperTest.java   |  105 +-
 .../GSInstallerClusterProviderTest.java         |  104 -
 .../GSInstallerComponentProviderTest.java       |  102 -
 .../GSInstallerHostComponentProviderTest.java   |  149 -
 .../GSInstallerHostProviderTest.java            |  153 -
 .../GSInstallerNoOpProviderTest.java            |   46 -
 .../GSInstallerServiceProviderTest.java         |  166 -
 .../TestGSInstallerStateProvider.java           |   36 -
 .../AbstractJDBCResourceProviderTest.java       |    2 +-
 .../BlueprintConfigurationProcessorTest.java    |  720 +-
 .../internal/CalculatedStatusTest.java          |    2 +-
 ...ClusterStackVersionResourceProviderTest.java |  284 +-
 .../internal/ComponentResourceProviderTest.java |   28 +-
 .../internal/HostResourceProviderTest.java      |    5 +-
 .../internal/ProvisionClusterRequestTest.java   |   32 -
 .../controller/internal/RequestImplTest.java    |    4 +-
 .../internal/RequestResourceProviderTest.java   |   68 +-
 .../internal/RequestStageContainerTest.java     |    2 +-
 .../internal/ScaleClusterRequestTest.java       |    6 -
 .../internal/TaskResourceProviderTest.java      |    8 +
 .../internal/UpgradeResourceProviderTest.java   |   66 +-
 .../LogSearchDataRetrievalServiceTest.java      |  248 +-
 .../logging/LoggingRequestHelperImplTest.java   |    9 +
 .../credentialapi/CredentialUtilTest.java       |    1 -
 .../apache/ambari/server/events/EventsTest.java |    4 -
 .../AlertMaintenanceModeListenerTest.java       |    4 +
 .../HostVersionOutOfSyncListenerTest.java       |   42 +-
 .../server/hooks/users/UserHookServiceTest.java |    4 +-
 .../metadata/AgentAlertDefinitionsTest.java     |    2 +-
 .../system/impl/TestAmbariMetricsSinkImpl.java  |   10 +
 .../apache/ambari/server/orm/OrmTestHelper.java |   13 +-
 .../server/orm/dao/HostRoleCommandDAOTest.java  |    2 -
 .../ambari/server/orm/dao/UserDAOTest.java      |    3 +-
 .../orm/entities/HostRoleCommandEntityTest.java |   49 +
 .../server/security/SecurityHelperImplTest.java |    3 +-
 ...ariAuthorizationProviderDisableUserTest.java |    2 +-
 .../AmbariLocalUserProviderTest.java            |    2 +-
 .../AmbariPamAuthenticationProviderTest.java    |   47 +-
 .../AmbariUserAuthenticationFilterTest.java     |    2 +-
 .../security/authorization/UserNameTest.java    |   70 +
 .../security/authorization/UsersTest.java       |    2 +-
 .../ldap/AmbariLdapDataPopulatorTest.java       |    3 +-
 .../serveraction/ServerActionExecutorTest.java  |    9 +-
 .../ComponentVersionCheckActionTest.java        |   69 +-
 .../QuickLinksConfigurationModuleTest.java      |   10 +
 .../stack/StackManagerCommonServicesTest.java   |    4 +-
 .../server/stack/StackManagerExtensionTest.java |   73 +-
 .../server/stack/StackManagerMiscTest.java      |   13 +-
 .../ambari/server/stack/StackManagerMock.java   |    5 +-
 .../ambari/server/stack/StackManagerTest.java   |   33 +-
 .../ambari/server/stack/ThemeModuleTest.java    |   12 +-
 .../server/stageplanner/TestStagePlanner.java   |    2 +-
 .../server/state/ServiceComponentTest.java      |   72 +
 .../ambari/server/state/UpgradeHelperTest.java  |   36 +-
 .../state/alerts/AlertDefinitionHashTest.java   |    2 +-
 .../state/alerts/AlertEventPublisherTest.java   |    3 -
 .../state/alerts/InitialAlertEventTest.java     |    3 -
 .../state/cluster/AlertDataManagerTest.java     |    4 +-
 .../cluster/ClusterEffectiveVersionTest.java    |    2 +
 .../server/state/cluster/ClusterTest.java       |   62 +-
 ...omponentHostConcurrentWriteDeadlockTest.java |    4 -
 .../server/topology/BlueprintImplTest.java      |   13 -
 .../ClusterConfigurationRequestTest.java        |   18 +-
 .../ClusterDeployWithStartOnlyTest.java         |   38 +-
 ...InstallWithoutStartOnComponentLevelTest.java |   34 +-
 .../ClusterInstallWithoutStartTest.java         |   38 +-
 .../topology/ClusterTopologyImplTest.java       |   57 +-
 .../server/topology/LogicalRequestTest.java     |   64 +
 .../topology/RequiredPasswordValidatorTest.java |  113 +-
 .../server/topology/TopologyManagerTest.java    |   14 +-
 .../validators/HiveServiceValidatorTest.java    |    3 +
 .../RequiredConfigPropertiesValidatorTest.java  |  302 +
 .../StackConfigTypeValidatorTest.java           |  126 +
 .../ambari/server/upgrade/SectionDDL.java       |    4 +-
 .../server/upgrade/UpgradeCatalog212Test.java   |   34 +-
 .../server/upgrade/UpgradeCatalog240Test.java   |    3 +-
 .../server/upgrade/UpgradeCatalog250Test.java   |   28 +-
 .../server/upgrade/UpgradeCatalog251Test.java   |  263 +
 .../server/upgrade/UpgradeCatalog252Test.java   |  166 +
 .../server/upgrade/UpgradeCatalog300Test.java   |  139 +
 .../ambari/server/utils/StageUtilsTest.java     |    2 +-
 .../view/ViewInstanceOperationHandlerTest.java  |  105 +
 .../ambari/server/view/ViewRegistryTest.java    |   38 +-
 .../src/test/python/TestAmbariServer.py         |  409 +-
 ambari-server/src/test/python/TestMpacks.py     |   17 +-
 .../src/test/python/TestResourceFilesKeeper.py  |   29 +-
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |  102 -
 .../2.0.6/HBASE/test_hbase_regionserver.py      |  104 -
 .../python/stacks/2.0.6/HDFS/test_datanode.py   |  177 +-
 .../stacks/2.0.6/HDFS/test_hdfs_client.py       |  110 +-
 .../stacks/2.0.6/HDFS/test_journalnode.py       |  114 -
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |  114 -
 .../python/stacks/2.0.6/HDFS/test_nfsgateway.py |  116 -
 .../python/stacks/2.0.6/HDFS/test_snamenode.py  |  117 +-
 .../test/python/stacks/2.0.6/HDFS/test_zkfc.py  |  102 +-
 .../stacks/2.0.6/HIVE/test_hive_server.py       |  112 -
 .../stacks/2.0.6/HIVE/test_webhcat_server.py    |  116 -
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     |  113 -
 .../stacks/2.0.6/YARN/test_historyserver.py     |  304 +-
 .../stacks/2.0.6/YARN/test_nodemanager.py       |  305 +-
 .../stacks/2.0.6/YARN/test_resourcemanager.py   |  160 +-
 .../2.0.6/ZOOKEEPER/test_zookeeper_server.py    |  103 -
 .../python/stacks/2.0.6/configs/default.json    |    4 +-
 .../hooks/after-INSTALL/test_after_install.py   |   25 +-
 .../stacks/2.1/FALCON/test_falcon_client.py     |   24 -
 .../stacks/2.1/FALCON/test_falcon_server.py     |  109 -
 .../stacks/2.1/HIVE/test_hive_metastore.py      |  120 +-
 .../stacks/2.1/STORM/test_storm_drpc_server.py  |  104 -
 .../stacks/2.1/STORM/test_storm_nimbus.py       |  103 -
 .../stacks/2.1/STORM/test_storm_ui_server.py    |   82 -
 .../stacks/2.1/YARN/test_apptimelineserver.py   |  150 +-
 .../stacks/2.1/common/test_stack_advisor.py     |    7 +-
 .../python/stacks/2.2/KNOX/test_knox_gateway.py |  102 -
 .../test/python/stacks/2.3/YARN/test_ats_1_5.py |   98 +-
 .../stacks/2.4/AMBARI_INFRA/test_infra_solr.py  |    3 +
 .../stacks/2.4/LOGSEARCH/test_logfeeder.py      |   29 +-
 .../stacks/2.4/LOGSEARCH/test_logsearch.py      |    4 +-
 .../test/python/stacks/2.4/configs/default.json |    3 +
 .../stacks/2.5/ATLAS/test_atlas_server.py       |   42 -
 .../stacks/2.5/RANGER_KMS/test_kms_server.py    |  111 +-
 .../stacks/2.5/common/test_stack_advisor.py     |  415 ++
 .../stacks/2.5/configs/ranger-kms-secured.json  |    2 +-
 .../stacks/2.6/common/test_stack_advisor.py     |  461 +-
 ambari-server/src/test/python/unitTests.py      |    2 +-
 .../resources/extensions/EXT/0.1/metainfo.xml   |    2 +-
 .../resources/extensions/EXT/0.2/metainfo.xml   |    3 +-
 .../resources/extensions/EXT/0.3/metainfo.xml   |   32 +
 .../EXT/0.3/services/OOZIE2/metainfo.xml        |  118 +
 .../services/OOZIE2/themes/broken_theme.json    |    3 +
 .../stacks/HDP/2.1.1/upgrades/upgrade_test.xml  |    2 +-
 .../HDP/2.2.0/upgrades/upgrade_test_checks.xml  |   21 +-
 .../stacks_with_extensions/HDP/0.3/metainfo.xml |   22 +
 .../HDP/0.3/repos/repoinfo.xml                  |   63 +
 .../HDP/0.3/services/HBASE/metainfo.xml         |   26 +
 .../0.3/services/HDFS/configuration/global.xml  |  145 +
 .../services/HDFS/configuration/hadoop-env.xml  |  223 +
 .../services/HDFS/configuration/hbase-site.xml  |  137 +
 .../services/HDFS/configuration/hdfs-log4j.xml  |  199 +
 .../services/HDFS/configuration/hdfs-site.xml   |  396 ++
 .../HDP/0.3/services/HDFS/metainfo.xml          |   30 +
 .../0.3/services/HDFS/package/dummy-script.py   |   20 +
 .../HDP/0.3/services/HIVE/metainfo.xml          |   26 +
 .../HDP/0.3/services/MAPREDUCE/metainfo.xml     |   23 +
 .../HDP/0.3/services/ZOOKEEPER/metainfo.xml     |   26 +
 ambari-web/app/app.js                           |   13 +
 .../global/background_operations_controller.js  |   12 +-
 ambari-web/app/controllers/installer.js         |   29 +-
 .../journalNode/step1_controller.js             |    2 +-
 .../main/admin/stack_and_upgrade_controller.js  |    5 +
 .../manage_alert_notifications_controller.js    |   28 +-
 .../controllers/main/service/add_controller.js  |    3 +
 .../controllers/main/service/info/summary.js    |    2 +-
 .../service/manage_config_groups_controller.js  |   89 +-
 ambari-web/app/controllers/wizard.js            |   44 +-
 .../app/controllers/wizard/step1_controller.js  |    7 +
 .../wizard/step7/assign_master_controller.js    |  291 +-
 .../app/controllers/wizard/step7_controller.js  |    1 +
 .../app/controllers/wizard/step8_controller.js  |  167 +-
 .../app/controllers/wizard/step9_controller.js  |    2 +-
 ambari-web/app/mappers/stack_mapper.js          |    1 +
 ambari-web/app/messages.js                      |   11 +-
 .../app/mixins/common/configs/configs_saver.js  |   32 +-
 .../mixins/wizard/assign_master_components.js   |   77 +-
 .../app/mixins/wizard/wizardHostsLoading.js     |    6 +-
 ambari-web/app/models/stack.js                  |    1 +
 ambari-web/app/routes/add_service_routes.js     |    1 +
 ambari-web/app/routes/installer.js              |    1 +
 ambari-web/app/routes/main.js                   |    2 +-
 ambari-web/app/styles/alerts.less               |   19 +-
 ambari-web/app/styles/application.less          |   17 +-
 ambari-web/app/styles/bootstrap_overrides.less  |    5 +
 .../app/styles/theme/bootstrap-ambari.css       |   75 +-
 ambari-web/app/styles/top-nav.less              |    2 +-
 ambari-web/app/styles/wizard.less               |    3 +
 .../stack_upgrade/stack_upgrade_wizard.hbs      |   24 +-
 .../admin/stack_upgrade/upgrade_version_box.hbs |    3 +
 ambari-web/app/templates/main/alerts.hbs        |   16 +-
 .../main/alerts/create_alert_notification.hbs   |   12 +
 ambari-web/app/templates/wizard/step1.hbs       |    6 +-
 ambari-web/app/templates/wizard/step10.hbs      |    2 +-
 ambari-web/app/templates/wizard/step3.hbs       |    2 +-
 .../wizard/step3/step3_host_warnings_popup.hbs  |    4 +-
 ambari-web/app/templates/wizard/step4.hbs       |   20 +-
 ambari-web/app/templates/wizard/step6.hbs       |    4 +-
 ambari-web/app/templates/wizard/step7.hbs       |    4 +-
 ambari-web/app/templates/wizard/step8.hbs       |    9 +-
 ambari-web/app/templates/wizard/step9.hbs       |    2 +-
 ambari-web/app/utils/ajax/ajax.js               |   19 +-
 ambari-web/app/utils/helper.js                  |    7 +-
 ambari-web/app/utils/host_progress_popup.js     |    2 +-
 .../common/assign_master_components_view.js     |    4 +
 .../widgets/slider_config_widget_view.js        |   17 +-
 .../common/host_progress_popup_body_view.js     |   20 +-
 .../app/views/common/quick_view_link_view.js    |   24 +-
 ambari-web/app/views/main/admin.js              |    2 +-
 .../stack_upgrade/upgrade_version_box_view.js   |    2 +
 ambari-web/app/views/main/menu.js               |    2 +-
 ambari-web/app/views/main/service/item.js       |    6 +-
 .../app/views/main/service/reassign_view.js     |    4 -
 ambari-web/app/views/wizard/step4_view.js       |    7 +-
 .../views/wizard/step9/hostLogPopupBody_view.js |    2 +-
 ambari-web/pom.xml                              |   20 +-
 .../global/background_operations_test.js        |  179 +-
 ambari-web/test/controllers/installer_test.js   |   12 -
 .../journalNode/step1_controller_test.js        |    4 +-
 ...anage_alert_notifications_controller_test.js |  165 +-
 .../main/service/add_controller_test.js         |   54 +-
 .../test/controllers/wizard/step5_test.js       |   86 +-
 .../step7/assign_master_controller_test.js      |  794 ++-
 .../test/controllers/wizard/step8_test.js       |  132 +-
 ambari-web/test/controllers/wizard_test.js      |   30 +-
 .../mixins/common/configs/configs_saver_test.js |   13 +
 ambari-web/test/utils/helper_test.js            |    5 +
 .../widgets/slider_config_widget_view_test.js   |   23 +-
 .../test/views/common/quick_link_view_test.js   |    2 +-
 .../resourceManager/wizard_view_test.js         |   18 +-
 .../views/main/service/reassign_view_test.js    |   12 -
 ambari-web/yarn.lock                            | 4153 +++++++++++
 .../2.0/hooks/before-START/scripts/params.py    |    2 +
 contrib/views/capacity-scheduler/pom.xml        |   32 +-
 .../ui/app/components/capacityInput.js          |    2 +
 .../src/main/resources/ui/yarn.lock             | 1374 ++++
 contrib/views/files/pom.xml                     |   18 +-
 .../view/filebrowser/DownloadService.java       |    4 +-
 .../files/src/main/resources/ui/package.json    |    2 +-
 .../views/files/src/main/resources/ui/yarn.lock | 6041 ++++++++++++++++
 contrib/views/hawq/pom.xml                      |   20 +-
 .../views/hawq/src/main/resources/ui/yarn.lock  | 6665 ++++++++++++++++++
 contrib/views/hive-next/pom.xml                 |   18 +-
 .../ambari/view/hive2/actor/HiveActor.java      |   32 +-
 .../ambari/view/hive2/actor/JdbcConnector.java  |    8 +-
 .../view/hive2/actor/OperationController.java   |    2 +-
 .../view/hive2/resources/files/FileService.java |    8 +-
 .../view/hive2/resources/jobs/JobService.java   |    9 +-
 .../src/main/resources/ui/hive-web/Brocfile.js  |    1 +
 .../ui/hive-web/app/controllers/index.js        |    2 +-
 .../resources/ui/hive-web/app/routes/splash.js  |    2 +-
 .../src/main/resources/ui/hive-web/bower.json   |    3 +-
 .../src/main/resources/ui/hive-web/package.json |    9 +-
 .../ui/hive-web/vendor/browser-pollyfills.js    |  213 +
 .../src/main/resources/ui/hive-web/yarn.lock    | 5066 +++++++++++++
 contrib/views/hive20/pom.xml                    |   18 +-
 .../ambari/view/hive20/actor/HiveActor.java     |   32 +-
 .../ambari/view/hive20/actor/JdbcConnector.java |    6 -
 .../view/hive20/actor/OperationController.java  |    2 +-
 .../view/hive20/internal/dto/TableStats.java    |   24 +-
 .../internal/parsers/TableMetaParserImpl.java   |    8 +-
 .../generators/InsertFromQueryGenerator.java    |   41 +-
 .../view/hive20/resources/jobs/JobService.java  |   16 +-
 .../resources/system/ranger/RangerService.java  |   26 +-
 .../uploads/query/InsertFromQueryInput.java     |   24 +-
 .../resources/ui/app/adapters/application.js    |    2 +-
 .../src/main/resources/ui/app/adapters/job.js   |    2 +-
 .../resources/ui/app/components/create-table.js |   32 +-
 .../resources/ui/app/components/jobs-browser.js |   18 +-
 .../resources/ui/app/components/setting-list.js |    9 +
 .../resources/ui/app/components/udf-item.js     |    1 +
 .../ui/app/controllers/savedqueries.js          |   24 +
 .../main/resources/ui/app/controllers/udfs.js   |    2 +
 .../routes/databases/database/tables/table.js   |   22 +-
 .../databases/database/tables/upload-table.js   |   16 +-
 .../resources/ui/app/routes/savedqueries.js     |   11 +-
 .../src/main/resources/ui/app/routes/udfs.js    |   10 +-
 .../resources/ui/app/services/auto-refresh.js   |    1 +
 .../src/main/resources/ui/app/styles/app.scss   |   38 +-
 .../app/templates/components/create-table.hbs   |    4 +-
 .../ui/app/templates/components/edit-table.hbs  |    5 -
 .../app/templates/components/jobs-browser.hbs   |   42 +-
 .../components/notification-message.hbs         |    2 +-
 .../app/templates/components/setting-list.hbs   |    8 +-
 .../templates/components/table-rename-form.hbs  |    2 +-
 .../templates/components/table-statistics.hbs   |   12 +-
 .../ui/app/templates/components/udf-edit.hbs    |   66 +-
 .../ui/app/templates/components/udf-item.hbs    |   10 +-
 .../ui/app/templates/components/udf-new.hbs     |   97 +-
 .../app/templates/components/visual-explain.hbs |    7 +-
 .../resources/ui/app/templates/databases.hbs    |    2 +-
 .../databases/database/tables/table.hbs         |   19 +-
 .../ui/app/templates/queries/query.hbs          |   16 +-
 .../resources/ui/app/templates/savedqueries.hbs |   82 +-
 .../main/resources/ui/app/templates/udfs.hbs    |   49 +-
 .../ui/app/utils/hive-explainer/processor.js    |   13 +-
 .../ui/app/utils/hive-explainer/renderer.js     |  230 +-
 .../ui/app/utils/hive-explainer/transformer.js  |   11 +-
 .../hive20/src/main/resources/ui/package.json   |    2 +-
 .../hive20/src/main/resources/ui/yarn.lock      | 6032 ++++++++++++++++
 contrib/views/hueambarimigration/pom.xml        |   18 +-
 .../ui/hueambarimigration-view/package.json     |    2 +-
 .../ui/hueambarimigration-view/yarn.lock        | 5553 +++++++++++++++
 contrib/views/jobs/pom.xml                      |   18 +-
 .../views/jobs/src/main/resources/ui/yarn.lock  | 2537 +++++++
 contrib/views/pig/pom.xml                       |   22 +-
 .../pig/src/main/resources/ui/pig-web/yarn.lock | 2376 +++++++
 contrib/views/pom.xml                           |    1 +
 .../org/apache/ambari/storm/ProxyServlet.java   |   12 +-
 .../ambari/storm/StormDetailsServlet.java       |   81 +
 .../storm/src/main/resources/WEB-INF/web.xml    |    8 +
 .../resources/scripts/components/SearchLogs.jsx |   38 +-
 .../src/main/resources/scripts/router/Router.js |    6 +-
 .../src/main/resources/scripts/utils/Utils.js   |   25 -
 contrib/views/storm/src/main/resources/view.xml |    8 +-
 .../view/utils/hdfs/ConfigurationBuilder.java   |    3 +-
 contrib/views/wfmanager/pom.xml                 |   18 +-
 .../ui/app/components/bundle-config.js          |    3 +-
 .../resources/ui/app/components/coord-config.js |    3 +-
 .../ui/app/components/distcp-action.js          |   15 +-
 .../resources/ui/app/components/drafts-wf.js    |    3 +-
 .../ui/app/components/flow-designer.js          |    1 -
 .../resources/ui/app/components/job-details.js  |   30 +-
 .../ui/app/components/recent-projects.js        |   11 +-
 .../ui/app/components/search-create-new-bar.js  |   21 +-
 .../ui/app/domain/jsplumb-flow-renderer.js      |  194 -
 .../resources/ui/app/domain/layout-manager1.js  |   88 -
 .../resources/ui/app/domain/layout-manager2.js  |   87 -
 .../resources/ui/app/routes/design/jobtab.js    |    2 +-
 .../src/main/resources/ui/app/styles/app.less   |   11 +
 .../app/templates/components/bundle-config.hbs  |    2 +-
 .../app/templates/components/coord-config.hbs   |    2 +-
 .../templates/components/coord-job-details.hbs  |   66 +-
 .../app/templates/components/distcp-action.hbs  |    7 +-
 .../ui/app/templates/components/drafts-wf.hbs   |    6 +-
 .../app/templates/components/flow-designer.hbs  |    2 +-
 .../app/templates/components/hdfs-browser.hbs   |   44 +-
 .../ui/app/templates/components/job-row.hbs     |    4 +-
 .../components/search-create-new-bar.hbs        |    4 +
 .../app/templates/components/search-table.hbs   |    2 +-
 .../components/workflow-job-details.hbs         |    6 +
 .../wfmanager/src/main/resources/ui/bower.json  |    1 -
 .../src/main/resources/ui/ember-cli-build.js    |    2 +-
 .../wfmanager/src/main/resources/ui/yarn.lock   | 5629 +++++++++++++++
 .../config-utils/diff_stack_properties.py       |  154 +
 dev-support/test-patch.sh                       |    9 +-
 docs/pom.xml                                    |   12 +
 pom.xml                                         |    1 +
 1603 files changed, 132226 insertions(+), 22139 deletions(-)
----------------------------------------------------------------------



[13/50] [abbrv] ambari git commit: Revert "AMBARI-21046. UI: Upgrades should be started using repo_version_ids instead of version strings (alexantonenko)"

Posted by ad...@apache.org.
Revert "AMBARI-21046. UI: Upgrades should be started using repo_version_ids instead of version strings (alexantonenko)"

This reverts commit 1568f800764a6b20f2f09330f112070ebc0f7f86.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0e5f2470
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0e5f2470
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0e5f2470

Branch: refs/heads/ambari-rest-api-explorer
Commit: 0e5f24700afad30b5f0ac0512442019dedf392cb
Parents: 9cb8701
Author: Alex Antonenko <hi...@gmail.com>
Authored: Thu May 18 19:40:31 2017 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Thu May 18 19:41:47 2017 +0300

----------------------------------------------------------------------
 .../controllers/main/admin/stack_and_upgrade_controller.js    | 7 ++-----
 ambari-web/app/utils/ajax/ajax.js                             | 4 ++--
 .../main/admin/stack_and_upgrade_controller_test.js           | 6 ------
 3 files changed, 4 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0e5f2470/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
index d444b2d..0f2efb0 100644
--- a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
@@ -414,8 +414,7 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
     if (currentVersion) {
       this.set('currentVersion', {
         repository_version: currentVersion.get('repositoryVersion.repositoryVersion'),
-        repository_name: currentVersion.get('repositoryVersion.displayName'),
-        id: currentVersion.get('repositoryVersion.id')
+        repository_name: currentVersion.get('repositoryVersion.displayName')
       });
     }
   },
@@ -737,7 +736,6 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
         from: App.RepositoryVersion.find().findProperty('displayName', this.get('upgradeVersion')).get('repositoryVersion'),
         value: currentVersion.repository_version,
         label: currentVersion.repository_name,
-        id: currentVersion.id,
         isDowngrade: true,
         upgradeType: this.get('upgradeType')
       },
@@ -1381,8 +1379,7 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
       label: version.get('displayName'),
       type: version.get('upgradeType'),
       skipComponentFailures: version.get('skipComponentFailures') ? 'true' : 'false',
-      skipSCFailures: version.get('skipSCFailures') ? 'true' : 'false',
-      id: version.get('id')
+      skipSCFailures: version.get('skipSCFailures') ? 'true' : 'false'
     };
     if (App.get('supports.preUpgradeCheck')) {
       this.set('requestInProgress', true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/0e5f2470/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index 4dc04f4..f7d0914 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -1712,7 +1712,7 @@ var urls = {
         timeout : 600000,
         data: JSON.stringify({
           "Upgrade": {
-            "repository_version_id": data.id,
+            "repository_version": data.value,
             "upgrade_type": data.type,
             "skip_failures": data.skipComponentFailures,
             "skip_service_check_failures": data.skipSCFailures,
@@ -1731,7 +1731,7 @@ var urls = {
         data: JSON.stringify({
           "Upgrade": {
             "from_version": data.from,
-            "repository_version_id": data.id,
+            "repository_version": data.value,
             "upgrade_type": data.upgradeType,
             "direction": "DOWNGRADE"
           }

http://git-wip-us.apache.org/repos/asf/ambari/blob/0e5f2470/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js b/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
index fa0a0b9..e696bb1 100644
--- a/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
+++ b/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
@@ -128,7 +128,6 @@ describe('App.MainAdminStackAndUpgradeController', function() {
       sinon.stub(App.StackVersion, 'find').returns([Em.Object.create({
         state: 'CURRENT',
         repositoryVersion: {
-          id: '1',
           repositoryVersion: '2.2',
           displayName: 'HDP-2.2'
         }
@@ -156,7 +155,6 @@ describe('App.MainAdminStackAndUpgradeController', function() {
     });
     it('currentVersion is corrent', function () {
       expect(controller.get('currentVersion')).to.eql({
-        "id": "1",
         "repository_version": "2.2",
         "repository_name": "HDP-2.2"
       });
@@ -391,7 +389,6 @@ describe('App.MainAdminStackAndUpgradeController', function() {
   describe("#runPreUpgradeCheck()", function() {
     it("make ajax call", function() {
       controller.runPreUpgradeCheck(Em.Object.create({
-        id: '1',
         repositoryVersion: '2.2',
         displayName: 'HDP-2.2',
         upgradeType: 'ROLLING',
@@ -402,7 +399,6 @@ describe('App.MainAdminStackAndUpgradeController', function() {
       expect(args[0]).to.exists;
       expect(args[0].sender).to.be.eql(controller);
       expect(args[0].data).to.be.eql({
-        id: '1',
         value: '2.2',
         label: 'HDP-2.2',
         type: 'ROLLING',
@@ -1130,7 +1126,6 @@ describe('App.MainAdminStackAndUpgradeController', function() {
       controller.set('upgradeVersion', 'HDP-2.3');
       controller.set('upgradeType', 'NON_ROLLING');
       controller.startDowngrade(Em.Object.create({
-        id: '1',
         repository_version: '2.2',
         repository_name: 'HDP-2.2'
       }));
@@ -1144,7 +1139,6 @@ describe('App.MainAdminStackAndUpgradeController', function() {
     it('request-data is valid', function () {
       expect(this.callArgs.data).to.eql({
         from: '2.3',
-        id: '1',
         value: '2.2',
         label: 'HDP-2.2',
         isDowngrade: true,


[21/50] [abbrv] ambari git commit: AMBARI-21065. Update some YARN settings for HDP 2.6 stack (Siddharth Seth via smohanty)

Posted by ad...@apache.org.
AMBARI-21065. Update some YARN settings for HDP 2.6 stack (Siddharth Seth via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f7a1d4e7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f7a1d4e7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f7a1d4e7

Branch: refs/heads/ambari-rest-api-explorer
Commit: f7a1d4e7e7445eb49cfefabaf92ec518fc44d8ba
Parents: 71ed281
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Thu May 18 21:46:15 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Thu May 18 22:32:31 2017 -0700

----------------------------------------------------------------------
 .../HDP/2.6/services/YARN/configuration/yarn-site.xml     | 10 ++++++++++
 .../resources/stacks/HDP/2.6/upgrades/config-upgrade.xml  |  5 +++++
 .../stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml    |  7 +++++++
 .../resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml     |  1 +
 4 files changed, 23 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f7a1d4e7/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml
index 6aa0bae..754a2c2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml
@@ -110,4 +110,14 @@
     </description>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>yarn.nodemanager.kill-escape.launch-command-line</name>
+    <value>slider-agent,LLAP</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.kill-escape.user</name>
+    <value>hive</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7a1d4e7/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index 628c119..a8ac1bc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -123,6 +123,11 @@
             <type>yarn-site</type>
             <insert key="yarn.nodemanager.admin-env" value=",PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:$PATH" insert-type="append" newline-before="false" newline-after="false" />
           </definition>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_llap_mem">
+            <type>yarn-site</type>
+            <set key="yarn.nodemanager.kill-escape.launch-command-line" value="slider-agent,LLAP"/>
+            <set key="yarn.nodemanager.kill-escape.user" value="hive"/>
+          </definition>
         </changes>
       </component>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7a1d4e7/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index f844f98..ae7ffc5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -334,6 +334,13 @@
         </task>
       </execute-stage>
 
+      <!-- YARN -->
+      <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM">
+        <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_llap_mem">
+          <summary>Updating YARN NodeManager config for LLAP</summary>
+        </task>
+      </execute-stage>
+
       <!-- KAFKA -->
       <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Apply config changes for Ranger Kafka plugin">
         <task xsi:type="configure" id="hdp_2_6_maint_ranger_kafka_plugin_cluster_name"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7a1d4e7/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index ceb5b84..c2ae825 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -697,6 +697,7 @@
       <component name="NODEMANAGER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env"/>
+          <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_llap_mem"/>
         </pre-upgrade>
         <pre-downgrade/> <!--  no-op to prevent config changes on downgrade -->
         <upgrade>


[19/50] [abbrv] ambari git commit: AMBARI-21045. Enable Storm's AutoTGT configs in secure mode.

Posted by ad...@apache.org.
AMBARI-21045. Enable Storm's AutoTGT configs in secure mode.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3499004c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3499004c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3499004c

Branch: refs/heads/ambari-rest-api-explorer
Commit: 3499004cc6c692c142ba4f6ad857ba67ea81d254
Parents: 7dc2ddc
Author: Sriharsha Chintalapani <ha...@hortonworks.com>
Authored: Thu May 18 13:34:29 2017 -0700
Committer: Sriharsha Chintalapani <ha...@hortonworks.com>
Committed: Thu May 18 13:34:29 2017 -0700

----------------------------------------------------------------------
 .../STORM/0.9.1/configuration/storm-env.xml     |  11 ++
 .../STORM/1.1.0/configuration/storm-site.xml    |  44 ++++++
 .../common-services/STORM/1.1.0/kerberos.json   | 138 +++++++++++++++++++
 .../common-services/STORM/1.1.0/metainfo.xml    |  44 ++++++
 .../stacks/HDP/2.6/services/STORM/metainfo.xml  |   4 +
 5 files changed, 241 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3499004c/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml b/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
index 4cfe3d5..cfa33e2 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
@@ -125,6 +125,17 @@ export STORM_LOG_DIR={{log_dir}}
 
 export STORM_CONF_DIR={{conf_dir}}
 export STORM_HOME={{storm_component_home_dir}}
+
+#set storm-auto creds
+# check if storm_jaas.conf in config , only enable storm_auto_creds in secure mode.
+STORM_HOME="$(dirname $(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ))"
+STORM_JAAS_CONF=$STORM_HOME/config/storm_jaas.conf
+STORM_AUTOCREDS_LIB_DIR=/usr/hdp/current/storm-client/external/storm-autocreds
+
+if [ -f $STORM_JAAS_CONF ] && [ -d $STORM_AUTOCREDS_LIB_DIR ]; then
+    export STORM_EXT_CLASSPATH=$STORM_AUTOCREDS_LIB_DIR
+fi
+
     </value>
     <value-attributes>
       <type>content</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3499004c/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml b/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml
new file mode 100644
index 0000000..1a5dde9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>nimbus.autocredential.plugins.classes</name>
+    <description>
+      Allows users to add token based authentication for services such as HDFS, HBase, Hive
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>nimbus.credential.renewers.freq.secs</name>
+    <description>
+      Frequency at which tokens will be renewed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>nimbus.credential.renewers.classes</name>
+    <description>
+      List of classes for token renewal
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3499004c/ambari-server/src/main/resources/common-services/STORM/1.1.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.1.0/kerberos.json b/ambari-server/src/main/resources/common-services/STORM/1.1.0/kerberos.json
new file mode 100644
index 0000000..643cfd3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.1.0/kerberos.json
@@ -0,0 +1,138 @@
+{
+  "services": [
+    {
+      "name": "STORM",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "name": "storm_components",
+          "principal": {
+            "value": "${storm-env/storm_user}${principal_suffix}@${realm}",
+            "type": "user",
+            "configuration": "storm-env/storm_principal_name"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/storm.headless.keytab",
+            "owner": {
+              "name": "${storm-env/storm_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": ""
+            },
+            "configuration": "storm-env/storm_keytab"
+          }
+        },
+        {
+          "name": "/STORM/storm_components",
+          "principal": {
+            "configuration": "storm-atlas-application.properties/atlas.jaas.KafkaClient.option.principal"
+          },
+          "keytab": {
+            "configuration": "storm-atlas-application.properties/atlas.jaas.KafkaClient.option.keyTab"
+          }
+        }
+      ],
+      "configurations": [
+        {
+          "storm-site": {
+            "nimbus.authorizer": "org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer",
+            "drpc.authorizer": "org.apache.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer",
+            "ui.filter": "org.apache.hadoop.security.authentication.server.AuthenticationFilter",
+            "storm.principal.tolocal": "org.apache.storm.security.auth.KerberosPrincipalToLocal",
+            "supervisor.enable": "true",
+            "storm.zookeeper.superACL": "sasl:{{storm_bare_jaas_principal}}",
+            "java.security.auth.login.config": "{{conf_dir}}/storm_jaas.conf",
+            "nimbus.impersonation.authorizer": "org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer",
+            "nimbus.impersonation.acl": "{ {{storm_bare_jaas_principal}} : {hosts: ['*'], groups: ['*']}}",
+            "nimbus.admins": "['{{storm_bare_jaas_principal}}', '{{ambari_bare_jaas_principal}}']",
+            "nimbus.supervisor.users": "['{{storm_bare_jaas_principal}}']",
+            "ui.filter.params": "{'type': 'kerberos', 'kerberos.principal': '{{storm_ui_jaas_principal}}', 'kerberos.keytab': '{{storm_ui_keytab_path}}', 'kerberos.name.rules': 'DEFAULT'}",
+            "nimbus.autocredential.plugins.classes": "['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']",
+            "nimbus.credential.renewers.classes": "['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']",
+            "nimbus.credential.renewers.freq.secs": 82800
+            
+          }
+        },
+        {
+          "ranger-storm-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "STORM_UI_SERVER",
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "storm-env/storm_ui_principal_name"
+              },
+              "keytab": {
+                "configuration": "storm-env/storm_ui_keytab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "NIMBUS",
+          "identities": [
+            {
+              "name": "nimbus_server",
+              "principal": {
+                "value": "nimbus/_HOST@${realm}",
+                "type": "service",
+                "configuration": "storm-env/nimbus_principal_name"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nimbus.service.keytab",
+                "owner": {
+                  "name": "${storm-env/storm_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "storm-env/nimbus_keytab"
+              }
+            },
+            {
+              "name": "/STORM/storm_components",
+              "principal": {
+                "configuration": "ranger-storm-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-storm-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "DRPC_SERVER",
+          "identities": [
+            {
+              "name": "drpc_server",
+              "reference": "/STORM/NIMBUS/nimbus_server"
+            }
+          ]
+        },
+        {
+          "name" : "SUPERVISOR"
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3499004c/ambari-server/src/main/resources/common-services/STORM/1.1.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.1.0/metainfo.xml b/ambari-server/src/main/resources/common-services/STORM/1.1.0/metainfo.xml
new file mode 100644
index 0000000..94f5ca3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.1.0/metainfo.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>STORM</name>
+      <version>1.1.0</version>
+      <extends>common-services/STORM/1.0.1</extends>
+
+      <configuration-dependencies>
+        <config-type>storm-site</config-type>
+        <config-type>storm-env</config-type>
+        <config-type>ranger-storm-plugin-properties</config-type>
+        <config-type>ranger-storm-audit</config-type>
+        <config-type>ranger-storm-policymgr-ssl</config-type>
+        <config-type>ranger-storm-security</config-type>
+        <config-type>admin-properties</config-type>
+        <config-type>ranger-ugsync-site</config-type>
+        <config-type>ranger-admin-site</config-type>
+        <config-type>zookeeper-env</config-type>
+        <config-type>zoo.cfg</config-type>
+        <config-type>application.properties</config-type>
+        <config-type>storm-atlas-application.properties</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3499004c/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml
index db5c8b8..49e00f7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml
@@ -22,6 +22,10 @@
     <service>
       <name>STORM</name>
       <version>1.1.0</version>
+      <extends>common-services/STORM/1.1.0</extends>
+      <configuration-dependencies>
+        <config-type>application-properties</config-type>
+      </configuration-dependencies>
     </service>
   </services>
 </metainfo>


[37/50] [abbrv] ambari git commit: AMBARI-21033 Log Search use POJOs for input configuration (mgergely)

Posted by ad...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
new file mode 100644
index 0000000..51c7ec8
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputFileBaseDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class InputFileBaseDescriptorImpl extends InputDescriptorImpl implements InputFileBaseDescriptor {
+  @Expose
+  @SerializedName("checkpoint_interval_ms")
+  private Integer checkpointIntervalMs;
+
+  @Expose
+  @SerializedName("process_file")
+  private Boolean processFile;
+
+  @Expose
+  @SerializedName("copy_file")
+  private Boolean copyFile;
+
+  @Override
+  public Boolean getProcessFile() {
+    return processFile;
+  }
+
+  public void setProcessFile(Boolean processFile) {
+    this.processFile = processFile;
+  }
+
+  @Override
+  public Boolean getCopyFile() {
+    return copyFile;
+  }
+
+  public void setCopyFile(Boolean copyFile) {
+    this.copyFile = copyFile;
+  }
+
+  @Override
+  public Integer getCheckpointIntervalMs() {
+    return checkpointIntervalMs;
+  }
+
+  public void setCheckpointIntervalMs(Integer checkpointIntervalMs) {
+    this.checkpointIntervalMs = checkpointIntervalMs;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileDescriptorImpl.java
new file mode 100644
index 0000000..3bfd161
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileDescriptorImpl.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputFileDescriptor;
+
+public class InputFileDescriptorImpl extends InputFileBaseDescriptorImpl implements InputFileDescriptor {
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
new file mode 100644
index 0000000..277a57c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputS3FileDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class InputS3FileDescriptorImpl extends InputFileBaseDescriptorImpl implements InputS3FileDescriptor {
+  @Expose
+  @SerializedName("s3_access_key")
+  private String s3AccessKey;
+
+  @Expose
+  @SerializedName("s3_secret_key")
+  private String s3SecretKey;
+
+  @Override
+  public String getS3AccessKey() {
+    return s3AccessKey;
+  }
+
+  public void setS3AccessKey(String s3AccessKey) {
+    this.s3AccessKey = s3AccessKey;
+  }
+
+  @Override
+  public String getS3SecretKey() {
+    return s3SecretKey;
+  }
+
+  public void setS3SecretKey(String s3SecretKey) {
+    this.s3SecretKey = s3SecretKey;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
new file mode 100644
index 0000000..9daad2b
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapDateDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class MapDateDescriptorImpl implements MapDateDescriptor {
+  @Override
+  public String getJsonName() {
+    return "map_date";
+  }
+
+  @Expose
+  @SerializedName("source_date_pattern")
+  private String sourceDatePattern;
+
+  @Expose
+  @SerializedName("target_date_pattern")
+  private String targetDatePattern;
+
+  @Override
+  public String getSourceDatePattern() {
+    return sourceDatePattern;
+  }
+
+  public void setSourceDatePattern(String sourceDatePattern) {
+    this.sourceDatePattern = sourceDatePattern;
+  }
+
+  @Override
+  public String getTargetDatePattern() {
+    return targetDatePattern;
+  }
+
+  public void setTargetDatePattern(String targetDatePattern) {
+    this.targetDatePattern = targetDatePattern;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
new file mode 100644
index 0000000..4a8d746
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldCopyDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class MapFieldCopyDescriptorImpl implements MapFieldCopyDescriptor {
+  @Override
+  public String getJsonName() {
+    return "map_fieldcopy";
+  }
+
+  @Expose
+  @SerializedName("copy_name")
+  private String copyName;
+
+  @Override
+  public String getCopyName() {
+    return copyName;
+  }
+
+  public void setCopyName(String copyName) {
+    this.copyName = copyName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
new file mode 100644
index 0000000..333cb67
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldNameDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class MapFieldNameDescriptorImpl implements MapFieldNameDescriptor {
+  @Override
+  public String getJsonName() {
+    return "map_fieldname";
+  }
+
+  @Expose
+  @SerializedName("new_fieldname")
+  private String newFieldName;
+
+  @Override
+  public String getNewFieldName() {
+    return newFieldName;
+  }
+
+  public void setNewFieldName(String newFieldName) {
+    this.newFieldName = newFieldName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
new file mode 100644
index 0000000..599e152
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldValueDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class MapFieldValueDescriptorImpl implements MapFieldValueDescriptor {
+  @Override
+  public String getJsonName() {
+    return "map_fieldvalue";
+  }
+
+  @Expose
+  @SerializedName("pre_value")
+  private String preValue;
+
+  @Expose
+  @SerializedName("post_value")
+  private String postValue;
+
+  @Override
+  public String getPreValue() {
+    return preValue;
+  }
+
+  public void setPreValue(String preValue) {
+    this.preValue = preValue;
+  }
+
+  @Override
+  public String getPostValue() {
+    return postValue;
+  }
+
+  public void setPostValue(String postValue) {
+    this.postValue = postValue;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
new file mode 100644
index 0000000..32aded8
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.lang.reflect.Type;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+
+import com.google.gson.JsonArray;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonSerializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonSerializationContext;
+
+public class PostMapValuesAdapter implements JsonDeserializer<List<PostMapValuesImpl>>, JsonSerializer<List<PostMapValuesImpl>> {
+  @Override
+  public List<PostMapValuesImpl> deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) {
+    List<PostMapValuesImpl> vals = new ArrayList<>();
+    if (json.isJsonArray()) {
+      for (JsonElement e : json.getAsJsonArray()) {
+        vals.add(createPostMapValues(e, context));
+      }
+    } else if (json.isJsonObject()) {
+      vals.add(createPostMapValues(json, context));
+    } else {
+      throw new RuntimeException("Unexpected JSON type: " + json.getClass());
+    }
+    return vals;
+  }
+
+  private PostMapValuesImpl createPostMapValues(JsonElement e, JsonDeserializationContext context) {
+    List<MapFieldDescriptor> mappers = new ArrayList<>();
+    for (Map.Entry<String, JsonElement> m : e.getAsJsonObject().entrySet()) {
+      switch (m.getKey()) {
+        case "map_date":
+          mappers.add((MapDateDescriptorImpl)context.deserialize(m.getValue(), MapDateDescriptorImpl.class));
+          break;
+        case "map_fieldcopy":
+          mappers.add((MapFieldCopyDescriptorImpl)context.deserialize(m.getValue(), MapFieldCopyDescriptorImpl.class));
+          break;
+        case "map_fieldname":
+          mappers.add((MapFieldNameDescriptorImpl)context.deserialize(m.getValue(), MapFieldNameDescriptorImpl.class));
+          break;
+        case "map_fieldvalue":
+          mappers.add((MapFieldValueDescriptorImpl)context.deserialize(m.getValue(), MapFieldValueDescriptorImpl.class));
+          break;
+        default:
+          System.out.println("Unknown key: " + m.getKey());
+      }
+    }
+    
+    PostMapValuesImpl postMapValues = new PostMapValuesImpl();
+    postMapValues.setMappers(mappers);
+    return postMapValues;
+  }
+
+  @Override
+  public JsonElement serialize(List<PostMapValuesImpl> src, Type typeOfSrc, JsonSerializationContext context) {
+    if (src.size() == 1) {
+      return createMapperObject(src.get(0), context);
+    } else {
+      JsonArray jsonArray = new JsonArray();
+      for (PostMapValuesImpl postMapValues : src) {
+        jsonArray.add(createMapperObject(postMapValues, context));
+      }
+      return jsonArray;
+    }
+  }
+
+  private JsonElement createMapperObject(PostMapValuesImpl postMapValues, JsonSerializationContext context) {
+    JsonObject jsonObject = new JsonObject();
+    for (MapFieldDescriptor m : postMapValues.getMappers()) {
+      jsonObject.add(((MapFieldDescriptor)m).getJsonName(), context.serialize(m));
+    }
+    return jsonObject;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesImpl.java
new file mode 100644
index 0000000..4d2254a
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesImpl.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.util.List;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.PostMapValues;
+
+import com.google.gson.annotations.Expose;
+
+public class PostMapValuesImpl implements PostMapValues {
+  @Expose
+  private List<MapFieldDescriptor> mappers;
+
+  public List<MapFieldDescriptor> getMappers() {
+    return mappers;
+  }
+
+  public void setMappers(List<MapFieldDescriptor> mappers) {
+    this.mappers = mappers;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/LogFeeder.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/LogFeeder.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/LogFeeder.java
index c853f42..8d7c69f 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/LogFeeder.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/LogFeeder.java
@@ -34,7 +34,7 @@ import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.ambari.logfeeder.metrics.MetricsManager;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
 import org.apache.ambari.logfeeder.util.SSLUtil;
-import org.apache.curator.shaded.com.google.common.collect.Maps;
+import com.google.common.collect.Maps;
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.log4j.Logger;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigBlock.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigBlock.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigBlock.java
index 68897e8..cfcc199 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigBlock.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigBlock.java
@@ -20,54 +20,19 @@
 package org.apache.ambari.logfeeder.common;
 
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 
-import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang3.StringUtils;
-import org.apache.log4j.Logger;
 import org.apache.log4j.Priority;
 
 
-public abstract class ConfigBlock {
-  private static final Logger LOG = Logger.getLogger(ConfigBlock.class);
-
-  private boolean drain = false;
-
+public abstract class ConfigBlock extends ConfigItem {
   protected Map<String, Object> configs;
   protected Map<String, String> contextFields = new HashMap<String, String>();
-  public MetricData statMetric = new MetricData(getStatMetricName(), false);
-  protected String getStatMetricName() {
-    return null;
-  }
-  
   public ConfigBlock() {
   }
 
-  /**
-   * Used while logging. Keep it short and meaningful
-   */
-  public abstract String getShortDescription();
-
-  /**
-   * Every implementor need to give name to the thread they create
-   */
-  public String getNameForThread() {
-    return this.getClass().getSimpleName();
-  }
-
-  public void addMetricsContainers(List<MetricData> metricsList) {
-    metricsList.add(statMetric);
-  }
-
-  /**
-   * This method needs to be overwritten by deriving classes.
-   */
-  public void init() throws Exception {
-  }
-
   public void loadConfig(Map<String, Object> map) {
     configs = LogFeederUtil.cloneObject(map);
 
@@ -81,46 +46,6 @@ public abstract class ConfigBlock {
     return configs;
   }
 
-  @SuppressWarnings("unchecked")
-  public boolean isEnabled() {
-    boolean isEnabled = getBooleanValue("is_enabled", true);
-    if (isEnabled) {
-      // Let's check for static conditions
-      Map<String, Object> conditions = (Map<String, Object>) configs.get("conditions");
-      boolean allow = true;
-      if (MapUtils.isNotEmpty(conditions)) {
-        allow = false;
-        for (String conditionType : conditions.keySet()) {
-          if (conditionType.equalsIgnoreCase("fields")) {
-            Map<String, Object> fields = (Map<String, Object>) conditions.get("fields");
-            for (String fieldName : fields.keySet()) {
-              Object values = fields.get(fieldName);
-              if (values instanceof String) {
-                allow = isFieldConditionMatch(fieldName, (String) values);
-              } else {
-                List<String> listValues = (List<String>) values;
-                for (String stringValue : listValues) {
-                  allow = isFieldConditionMatch(fieldName, stringValue);
-                  if (allow) {
-                    break;
-                  }
-                }
-              }
-              if (allow) {
-                break;
-              }
-            }
-          }
-          if (allow) {
-            break;
-          }
-        }
-        isEnabled = allow;
-      }
-    }
-    return isEnabled;
-  }
-
   public boolean isFieldConditionMatch(String fieldName, String stringValue) {
     boolean allow = false;
     String fieldValue = (String) configs.get(fieldName);
@@ -207,27 +132,17 @@ public abstract class ConfigBlock {
     return retValue;
   }
 
-  public Map<String, String> getContextFields() {
-    return contextFields;
-  }
-
-  public void incrementStat(int count) {
-    statMetric.value += count;
-  }
-
-  public void logStatForMetric(MetricData metric, String prefixStr) {
-    LogFeederUtil.logStatForMetric(metric, prefixStr, ", key=" + getShortDescription());
+  @Override
+  public boolean isEnabled() {
+    return getBooleanValue("is_enabled", true);
   }
 
-  public synchronized void logStat() {
-    logStatForMetric(statMetric, "Stat");
+  public Map<String, String> getContextFields() {
+    return contextFields;
   }
 
   public boolean logConfigs(Priority level) {
-    if (level.toInt() == Priority.INFO_INT && !LOG.isInfoEnabled()) {
-      return false;
-    }
-    if (level.toInt() == Priority.DEBUG_INT && !LOG.isDebugEnabled()) {
+    if (!super.logConfigs(level)) {
       return false;
     }
     LOG.log(level, "Printing configuration Block=" + getShortDescription());
@@ -235,12 +150,4 @@ public abstract class ConfigBlock {
     LOG.log(level, "contextFields=" + contextFields);
     return true;
   }
-
-  public boolean isDrain() {
-    return drain;
-  }
-
-  public void setDrain(boolean drain) {
-    this.drain = drain;
-  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigHandler.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigHandler.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigHandler.java
index effe980..726ff27 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigHandler.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigHandler.java
@@ -46,13 +46,19 @@ import org.apache.ambari.logfeeder.util.LogFeederUtil;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.BooleanUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.ambari.logfeeder.util.AliasUtil.AliasType;
 import org.apache.ambari.logsearch.config.api.InputConfigMonitor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.FilterDescriptorImpl;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputConfigImpl;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputDescriptorImpl;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 
-import com.google.common.collect.ImmutableMap;
 import com.google.gson.reflect.TypeToken;
 
 public class ConfigHandler implements InputConfigMonitor {
@@ -61,10 +67,11 @@ public class ConfigHandler implements InputConfigMonitor {
   private final OutputManager outputManager = new OutputManager();
   private final InputManager inputManager = new InputManager();
 
-  public static Map<String, Object> globalConfigs = new HashMap<>();
+  private final Map<String, Object> globalConfigs = new HashMap<>();
+  private final List<String> globalConfigJsons = new ArrayList<String>();
 
-  private final List<Map<String, Object>> inputConfigList = new ArrayList<>();
-  private final List<Map<String, Object>> filterConfigList = new ArrayList<>();
+  private final List<InputDescriptor> inputConfigList = new ArrayList<>();
+  private final List<FilterDescriptor> filterConfigList = new ArrayList<>();
   private final List<Map<String, Object>> outputConfigList = new ArrayList<>();
   
   private boolean simulateMode = false;
@@ -141,11 +148,12 @@ public class ConfigHandler implements InputConfigMonitor {
   }
   
   @Override
-  public void loadInputConfigs(String serviceName, String inputConfigData) throws Exception {
+  public void loadInputConfigs(String serviceName, InputConfig inputConfig) throws Exception {
     inputConfigList.clear();
     filterConfigList.clear();
     
-    loadConfigs(inputConfigData);
+    inputConfigList.addAll(inputConfig.getInput());
+    filterConfigList.addAll(inputConfig.getFilter());
     
     if (simulateMode) {
       InputSimulate.loadTypeToFilePath(inputConfigList);
@@ -173,14 +181,7 @@ public class ConfigHandler implements InputConfigMonitor {
       switch (key) {
         case "global" :
           globalConfigs.putAll((Map<String, Object>) configMap.get(key));
-          break;
-        case "input" :
-          List<Map<String, Object>> inputConfig = (List<Map<String, Object>>) configMap.get(key);
-          inputConfigList.addAll(inputConfig);
-          break;
-        case "filter" :
-          List<Map<String, Object>> filterConfig = (List<Map<String, Object>>) configMap.get(key);
-          filterConfigList.addAll(filterConfig);
+          globalConfigJsons.add(configData);
           break;
         case "output" :
           List<Map<String, Object>> outputConfig = (List<Map<String, Object>>) configMap.get(key);
@@ -192,21 +193,28 @@ public class ConfigHandler implements InputConfigMonitor {
     }
   }
   
+  @Override
+  public List<String> getGlobalConfigJsons() {
+    return globalConfigJsons;
+  }
+  
   private void simulateIfNeeded() throws Exception {
     int simulatedInputNumber = LogFeederUtil.getIntProperty("logfeeder.simulate.input_number", 0);
     if (simulatedInputNumber == 0)
       return;
     
-    List<Map<String, Object>> simulateInputConfigList = new ArrayList<>();
+    InputConfigImpl simulateInputConfig = new InputConfigImpl();
+    List<InputDescriptorImpl> inputConfigDescriptors = new ArrayList<>();
+    simulateInputConfig.setInput(inputConfigDescriptors);
+    simulateInputConfig.setFilter(new ArrayList<FilterDescriptorImpl>());
     for (int i = 0; i < simulatedInputNumber; i++) {
-      HashMap<String, Object> mapList = new HashMap<String, Object>();
-      mapList.put("source", "simulate");
-      mapList.put("rowtype", "service");
-      simulateInputConfigList.add(mapList);
+      InputDescriptorImpl inputDescriptor = new InputDescriptorImpl() {};
+      inputDescriptor.setSource("simulate");
+      inputDescriptor.setRowtype("service");
+      inputDescriptor.setAddFields(new HashMap<String, String>());
+      inputConfigDescriptors.add(inputDescriptor);
     }
     
-    Map<String, List<Map<String, Object>>> simulateInputConfigMap = ImmutableMap.of("input", simulateInputConfigList);
-    String simulateInputConfig = LogFeederUtil.getGson().toJson(simulateInputConfigMap);
     loadInputConfigs("Simulation", simulateInputConfig);
     
     simulateMode = true;
@@ -233,7 +241,7 @@ public class ConfigHandler implements InputConfigMonitor {
       output.loadConfig(map);
 
       // We will only check for is_enabled out here. Down below we will check whether this output is enabled for the input
-      if (output.getBooleanValue("is_enabled", true)) {
+      if (output.isEnabled()) {
         output.logConfigs(Level.INFO);
         outputManager.add(output);
       } else {
@@ -243,24 +251,23 @@ public class ConfigHandler implements InputConfigMonitor {
   }
 
   private void loadInputs(String serviceName) {
-    for (Map<String, Object> map : inputConfigList) {
-      if (map == null) {
+    for (InputDescriptor inputDescriptor : inputConfigList) {
+      if (inputDescriptor == null) {
         continue;
       }
-      mergeBlocks(globalConfigs, map);
 
-      String value = (String) map.get("source");
-      if (StringUtils.isEmpty(value)) {
+      String source = (String) inputDescriptor.getSource();
+      if (StringUtils.isEmpty(source)) {
         LOG.error("Input block doesn't have source element");
         continue;
       }
-      Input input = (Input) AliasUtil.getClassInstance(value, AliasType.INPUT);
+      Input input = (Input) AliasUtil.getClassInstance(source, AliasType.INPUT);
       if (input == null) {
         LOG.error("Input object could not be found");
         continue;
       }
-      input.setType(value);
-      input.loadConfig(map);
+      input.setType(source);
+      input.loadConfig(inputDescriptor);
 
       if (input.isEnabled()) {
         input.setOutputManager(outputManager);
@@ -278,13 +285,20 @@ public class ConfigHandler implements InputConfigMonitor {
 
     List<Input> toRemoveInputList = new ArrayList<Input>();
     for (Input input : inputManager.getInputList(serviceName)) {
-      for (Map<String, Object> map : filterConfigList) {
-        if (map == null) {
+      for (FilterDescriptor filterDescriptor : filterConfigList) {
+        if (filterDescriptor == null) {
+          continue;
+        }
+        if (BooleanUtils.isFalse(filterDescriptor.isEnabled())) {
+          LOG.debug("Ignoring filter " + filterDescriptor.getFilter() + " because it is disabled");
+          continue;
+        }
+        if (!input.isFilterRequired(filterDescriptor)) {
+          LOG.debug("Ignoring filter " + filterDescriptor.getFilter() + " for input " + input.getShortDescription());
           continue;
         }
-        mergeBlocks(globalConfigs, map);
 
-        String value = (String) map.get("filter");
+        String value = filterDescriptor.getFilter();
         if (StringUtils.isEmpty(value)) {
           LOG.error("Filter block doesn't have filter element");
           continue;
@@ -294,16 +308,12 @@ public class ConfigHandler implements InputConfigMonitor {
           LOG.error("Filter object could not be found");
           continue;
         }
-        filter.loadConfig(map);
+        filter.loadConfig(filterDescriptor);
         filter.setInput(input);
 
-        if (filter.isEnabled()) {
-          filter.setOutputManager(outputManager);
-          input.addFilter(filter);
-          filter.logConfigs(Level.INFO);
-        } else {
-          LOG.debug("Ignoring filter " + filter.getShortDescription() + " for input " + input.getShortDescription());
-        }
+        filter.setOutputManager(outputManager);
+        input.addFilter(filter);
+        filter.logConfigs(Level.INFO);
       }
       
       if (input.getFirstFilter() == null) {
@@ -318,43 +328,25 @@ public class ConfigHandler implements InputConfigMonitor {
   }
 
   private void sortFilters() {
-    Collections.sort(filterConfigList, new Comparator<Map<String, Object>>() {
-
+    Collections.sort(filterConfigList, new Comparator<FilterDescriptor>() {
       @Override
-      public int compare(Map<String, Object> o1, Map<String, Object> o2) {
-        Object o1Sort = o1.get("sort_order");
-        Object o2Sort = o2.get("sort_order");
+      public int compare(FilterDescriptor o1, FilterDescriptor o2) {
+        Integer o1Sort = o1.getSortOrder();
+        Integer o2Sort = o2.getSortOrder();
         if (o1Sort == null || o2Sort == null) {
           return 0;
         }
         
-        int o1Value = parseSort(o1, o1Sort);
-        int o2Value = parseSort(o2, o2Sort);
-        
-        return o1Value - o2Value;
-      }
-
-      private int parseSort(Map<String, Object> map, Object o) {
-        if (!(o instanceof Number)) {
-          try {
-            return (new Double(Double.parseDouble(o.toString()))).intValue();
-          } catch (Throwable t) {
-            LOG.error("Value is not of type Number. class=" + o.getClass().getName() + ", value=" + o.toString()
-              + ", map=" + map.toString());
-            return 0;
-          }
-        } else {
-          return ((Number) o).intValue();
-        }
+        return o1Sort - o2Sort;
       }
-    });
+    } );
   }
 
   private void assignOutputsToInputs(String serviceName) {
     Set<Output> usedOutputSet = new HashSet<Output>();
     for (Input input : inputManager.getInputList(serviceName)) {
       for (Output output : outputManager.getOutputs()) {
-        if (LogFeederUtil.isEnabled(output.getConfigs(), input.getConfigs())) {
+        if (input.isOutputRequired(output)) {
           usedOutputSet.add(output);
           input.addOutput(output);
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigItem.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigItem.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigItem.java
new file mode 100644
index 0000000..5c20a8e
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigItem.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logfeeder.common;
+
+import java.util.List;
+
+import org.apache.ambari.logfeeder.metrics.MetricData;
+import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.log4j.Logger;
+import org.apache.log4j.Priority;
+
+public abstract class ConfigItem {
+
+  protected static final Logger LOG = Logger.getLogger(ConfigBlock.class);
+  private boolean drain = false;
+  public MetricData statMetric = new MetricData(getStatMetricName(), false);
+
+  public ConfigItem() {
+    super();
+  }
+
+  protected String getStatMetricName() {
+    return null;
+  }
+
+  /**
+   * Used while logging. Keep it short and meaningful
+   */
+  public abstract String getShortDescription();
+
+  /**
+   * Every implementor need to give name to the thread they create
+   */
+  public String getNameForThread() {
+    return this.getClass().getSimpleName();
+  }
+
+  public void addMetricsContainers(List<MetricData> metricsList) {
+    metricsList.add(statMetric);
+  }
+
+  /**
+   * This method needs to be overwritten by deriving classes.
+   */
+  public void init() throws Exception {
+  }
+
+  public abstract boolean isEnabled();
+
+  public void incrementStat(int count) {
+    statMetric.value += count;
+  }
+
+  public void logStatForMetric(MetricData metric, String prefixStr) {
+    LogFeederUtil.logStatForMetric(metric, prefixStr, ", key=" + getShortDescription());
+  }
+
+  public synchronized void logStat() {
+    logStatForMetric(statMetric, "Stat");
+  }
+
+  public boolean logConfigs(Priority level) {
+    if (level.toInt() == Priority.INFO_INT && !LOG.isInfoEnabled()) {
+      return false;
+    }
+    if (level.toInt() == Priority.DEBUG_INT && !LOG.isDebugEnabled()) {
+      return false;
+    }
+    return true;
+  }
+
+  public boolean isDrain() {
+    return drain;
+  }
+
+  public void setDrain(boolean drain) {
+    this.drain = drain;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/Filter.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/Filter.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/Filter.java
index afd903e..fd02497 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/Filter.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/Filter.java
@@ -24,7 +24,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.ambari.logfeeder.common.ConfigBlock;
+import org.apache.ambari.logfeeder.common.ConfigItem;
 import org.apache.ambari.logfeeder.common.LogfeederException;
 import org.apache.ambari.logfeeder.input.Input;
 import org.apache.ambari.logfeeder.input.InputMarker;
@@ -33,18 +33,28 @@ import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.ambari.logfeeder.output.OutputManager;
 import org.apache.ambari.logfeeder.util.AliasUtil;
 import org.apache.ambari.logfeeder.util.AliasUtil.AliasType;
-import org.apache.log4j.Logger;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.PostMapValues;
+import org.apache.commons.lang.BooleanUtils;
 import org.apache.log4j.Priority;
 
-public abstract class Filter extends ConfigBlock {
-  private static final Logger LOG = Logger.getLogger(Filter.class);
-
+public abstract class Filter extends ConfigItem {
+  protected FilterDescriptor filterDescriptor;
   protected Input input;
   private Filter nextFilter = null;
   private OutputManager outputManager;
 
   private Map<String, List<Mapper>> postFieldValueMappers = new HashMap<String, List<Mapper>>();
 
+  public void loadConfig(FilterDescriptor filterDescriptor) {
+    this.filterDescriptor = filterDescriptor;
+  }
+
+  public FilterDescriptor getFilterDescriptor() {
+    return filterDescriptor;
+  }
+
   @Override
   public void init() throws Exception {
     super.init();
@@ -55,28 +65,22 @@ public abstract class Filter extends ConfigBlock {
     }
   }
 
-  @SuppressWarnings("unchecked")
   private void initializePostMapValues() {
-    Map<String, Object> postMapValues = (Map<String, Object>) getConfigValue("post_map_values");
+    Map<String, ? extends List<? extends PostMapValues>> postMapValues = filterDescriptor.getPostMapValues();
     if (postMapValues == null) {
       return;
     }
     for (String fieldName : postMapValues.keySet()) {
-      List<Map<String, Object>> mapList = null;
-      Object values = postMapValues.get(fieldName);
-      if (values instanceof List<?>) {
-        mapList = (List<Map<String, Object>>) values;
-      } else {
-        mapList = new ArrayList<Map<String, Object>>();
-        mapList.add((Map<String, Object>) values);
-      }
-      for (Map<String, Object> mapObject : mapList) {
-        for (String mapClassCode : mapObject.keySet()) {
+      List<? extends PostMapValues> values = postMapValues.get(fieldName);
+      for (PostMapValues pmv : values) {
+        for (MapFieldDescriptor mapFieldDescriptor : pmv.getMappers()) {
+          String mapClassCode = mapFieldDescriptor.getJsonName();
           Mapper mapper = (Mapper) AliasUtil.getClassInstance(mapClassCode, AliasType.MAPPER);
           if (mapper == null) {
-            break;
+            LOG.warn("Unknown mapper type: " + mapClassCode);
+            continue;
           }
-          if (mapper.init(getInput().getShortDescription(), fieldName, mapClassCode, mapObject.get(mapClassCode))) {
+          if (mapper.init(getInput().getShortDescription(), fieldName, mapClassCode, mapFieldDescriptor)) {
             List<Mapper> fieldMapList = postFieldValueMappers.get(fieldName);
             if (fieldMapList == null) {
               fieldMapList = new ArrayList<Mapper>();
@@ -156,15 +160,8 @@ public abstract class Filter extends ConfigBlock {
   }
 
   @Override
-  public boolean isFieldConditionMatch(String fieldName, String stringValue) {
-    if (!super.isFieldConditionMatch(fieldName, stringValue)) {
-      if (input != null) {
-        return input.isFieldConditionMatch(fieldName, stringValue);
-      } else {
-        return false;
-      }
-    }
-    return true;
+  public boolean isEnabled() {
+    return BooleanUtils.isNotFalse(filterDescriptor.isEnabled());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterGrok.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterGrok.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterGrok.java
index 7e2da70..70aea65 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterGrok.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterGrok.java
@@ -38,6 +38,8 @@ import org.apache.ambari.logfeeder.common.LogfeederException;
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterGrokDescriptor;
+import org.apache.commons.lang3.BooleanUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -75,11 +77,10 @@ public class FilterGrok extends Filter {
     super.init();
 
     try {
-      messagePattern = escapePattern(getStringValue("message_pattern"));
-      multilinePattern = escapePattern(getStringValue("multiline_pattern"));
-      sourceField = getStringValue("source_field");
-      removeSourceField = getBooleanValue("remove_source_field",
-        removeSourceField);
+      messagePattern = escapePattern(((FilterGrokDescriptor)filterDescriptor).getMessagePattern());
+      multilinePattern = escapePattern(((FilterGrokDescriptor)filterDescriptor).getMultilinePattern());
+      sourceField = ((FilterGrokDescriptor)filterDescriptor).getSourceField();
+      removeSourceField = BooleanUtils.toBooleanDefaultIfNull(filterDescriptor.isRemoveSourceField(), removeSourceField);
 
       LOG.info("init() done. grokPattern=" + messagePattern + ", multilinePattern=" + multilinePattern + ", " +
       getShortDescription());

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterJSON.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterJSON.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterJSON.java
index 35f692e..cfccdeb 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterJSON.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterJSON.java
@@ -25,12 +25,9 @@ import org.apache.ambari.logfeeder.common.LogfeederException;
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.util.DateUtil;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.apache.log4j.Logger;
 
 public class FilterJSON extends Filter {
   
-  private static final Logger LOG  = Logger.getLogger(FilterJSON.class);
-
   @Override
   public void apply(String inputStr, InputMarker inputMarker) throws LogfeederException {
     Map<String, Object> jsonMap = null;

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterKeyValue.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterKeyValue.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterKeyValue.java
index b04a439..f2a4186 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterKeyValue.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterKeyValue.java
@@ -28,13 +28,11 @@ import org.apache.ambari.logfeeder.common.LogfeederException;
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterKeyValueDescriptor;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
 
 public class FilterKeyValue extends Filter {
-  private static final Logger LOG = Logger.getLogger(FilterKeyValue.class);
-
   private String sourceField = null;
   private String valueSplit = "=";
   private String fieldSplit = "\t";
@@ -46,10 +44,10 @@ public class FilterKeyValue extends Filter {
   public void init() throws Exception {
     super.init();
 
-    sourceField = getStringValue("source_field");
-    valueSplit = getStringValue("value_split", valueSplit);
-    fieldSplit = getStringValue("field_split", fieldSplit);
-    valueBorders = getStringValue("value_borders");
+    sourceField = filterDescriptor.getSourceField();
+    valueSplit = StringUtils.defaultString(((FilterKeyValueDescriptor)filterDescriptor).getValueSplit(), valueSplit);
+    fieldSplit = StringUtils.defaultString(((FilterKeyValueDescriptor)filterDescriptor).getFieldSplit(), fieldSplit);
+    valueBorders = ((FilterKeyValueDescriptor)filterDescriptor).getValueBorders();
 
     LOG.info("init() done. source_field=" + sourceField + ", value_split=" + valueSplit + ", " + ", field_split=" +
         fieldSplit + ", " + getShortDescription());

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/AbstractInputFile.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/AbstractInputFile.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/AbstractInputFile.java
index 41a1fa5..cfa1903 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/AbstractInputFile.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/AbstractInputFile.java
@@ -29,14 +29,14 @@ import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputFileBaseDescriptor;
+import org.apache.commons.lang.BooleanUtils;
+import org.apache.commons.lang.ObjectUtils;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
 
 public abstract class AbstractInputFile extends Input {
-  protected static final Logger LOG = Logger.getLogger(AbstractInputFile.class);
-
   private static final int DEFAULT_CHECKPOINT_INTERVAL_MS = 5 * 1000;
 
   protected File[] logFiles;
@@ -73,16 +73,16 @@ public abstract class AbstractInputFile extends Input {
 
     // Let's close the file and set it to true after we start monitoring it
     setClosed(true);
-    logPath = getStringValue("path");
-    tail = getBooleanValue("tail", tail);
-    checkPointIntervalMS = getIntValue("checkpoint.interval.ms", DEFAULT_CHECKPOINT_INTERVAL_MS);
+    logPath = inputDescriptor.getPath();
+    tail = BooleanUtils.toBooleanDefaultIfNull(inputDescriptor.isTail(), tail);
+    checkPointIntervalMS = (int) ObjectUtils.defaultIfNull(((InputFileBaseDescriptor)inputDescriptor).getCheckpointIntervalMs(), DEFAULT_CHECKPOINT_INTERVAL_MS);
 
     if (StringUtils.isEmpty(logPath)) {
       LOG.error("path is empty for file input. " + getShortDescription());
       return;
     }
 
-    String startPosition = getStringValue("start_position");
+    String startPosition = inputDescriptor.getStartPosition();
     if (StringUtils.isEmpty(startPosition) || startPosition.equalsIgnoreCase("beginning") ||
         startPosition.equalsIgnoreCase("begining") || !tail) {
       isStartFromBegining = true;
@@ -313,7 +313,7 @@ public abstract class AbstractInputFile extends Input {
 
   @Override
   public String getShortDescription() {
-    return "input:source=" + getStringValue("source") + ", path=" +
+    return "input:source=" + inputDescriptor.getSource() + ", path=" +
         (!ArrayUtils.isEmpty(logFiles) ? logFiles[0].getAbsolutePath() : logPath);
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/Input.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/Input.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/Input.java
index 9f54d8a..fba596d 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/Input.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/Input.java
@@ -21,23 +21,25 @@ package org.apache.ambari.logfeeder.input;
 
 import java.io.File;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.ambari.logfeeder.input.cache.LRUCache;
-import org.apache.ambari.logfeeder.common.ConfigBlock;
+import org.apache.ambari.logfeeder.common.ConfigItem;
 import org.apache.ambari.logfeeder.common.LogfeederException;
 import org.apache.ambari.logfeeder.filter.Filter;
 import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.ambari.logfeeder.output.Output;
 import org.apache.ambari.logfeeder.output.OutputManager;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.apache.log4j.Logger;
-
-public abstract class Input extends ConfigBlock implements Runnable {
-  private static final Logger LOG = Logger.getLogger(Input.class);
-
+import org.apache.ambari.logsearch.config.api.model.inputconfig.Conditions;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.Fields;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+import org.apache.commons.lang.BooleanUtils;
+import org.apache.log4j.Priority;
+
+public abstract class Input extends ConfigItem implements Runnable {
   private static final boolean DEFAULT_TAIL = true;
   private static final boolean DEFAULT_USE_EVENT_MD5 = false;
   private static final boolean DEFAULT_GEN_EVENT_MD5 = true;
@@ -47,12 +49,8 @@ public abstract class Input extends ConfigBlock implements Runnable {
   private static final long DEFAULT_CACHE_DEDUP_INTERVAL = 1000;
   private static final String DEFAULT_CACHE_KEY_FIELD = "log_message";
 
-  private static final String CACHE_ENABLED = "cache_enabled";
-  private static final String CACHE_KEY_FIELD = "cache_key_field";
-  private static final String CACHE_LAST_DEDUP_ENABLED = "cache_last_dedup_enabled";
-  private static final String CACHE_SIZE = "cache_size";
-  private static final String CACHE_DEDUP_INTERVAL = "cache_dedup_interval";
-
+  protected InputDescriptor inputDescriptor;
+  
   protected InputManager inputManager;
   protected OutputManager outputManager;
   private List<Output> outputList = new ArrayList<Output>();
@@ -75,21 +73,12 @@ public abstract class Input extends ConfigBlock implements Runnable {
     return null;
   }
   
-  @Override
-  public void loadConfig(Map<String, Object> map) {
-    super.loadConfig(map);
-    String typeValue = getStringValue("type");
-    if (typeValue != null) {
-      // Explicitly add type and value to field list
-      contextFields.put("type", typeValue);
-      @SuppressWarnings("unchecked")
-      Map<String, Object> addFields = (Map<String, Object>) map.get("add_fields");
-      if (addFields == null) {
-        addFields = new HashMap<String, Object>();
-        map.put("add_fields", addFields);
-      }
-      addFields.put("type", typeValue);
-    }
+  public void loadConfig(InputDescriptor inputDescriptor) {
+    this.inputDescriptor = inputDescriptor;
+  }
+
+  public InputDescriptor getInputDescriptor() {
+    return inputDescriptor;
   }
 
   public void setType(String type) {
@@ -104,6 +93,12 @@ public abstract class Input extends ConfigBlock implements Runnable {
     this.outputManager = outputManager;
   }
 
+  public boolean isFilterRequired(FilterDescriptor filterDescriptor) {
+    Conditions conditions = filterDescriptor.getConditions();
+    Fields fields = conditions.getFields();
+    return fields.getType().contains(inputDescriptor.getType());
+  }
+
   public void addFilter(Filter filter) {
     if (firstFilter == null) {
       firstFilter = filter;
@@ -116,6 +111,22 @@ public abstract class Input extends ConfigBlock implements Runnable {
     }
   }
 
+  @SuppressWarnings("unchecked")
+  public boolean isOutputRequired(Output output) {
+    Map<String, Object> conditions = (Map<String, Object>) output.getConfigs().get("conditions");
+    if (conditions == null) {
+      return false;
+    }
+    
+    Map<String, Object> fields = (Map<String, Object>) conditions.get("fields");
+    if (fields == null) {
+      return false;
+    }
+    
+    List<String> types = (List<String>) fields.get("rowtype");
+    return types.contains(inputDescriptor.getRowtype());
+  }
+
   public void addOutput(Output output) {
     outputList.add(output);
   }
@@ -124,9 +135,9 @@ public abstract class Input extends ConfigBlock implements Runnable {
   public void init() throws Exception {
     super.init();
     initCache();
-    tail = getBooleanValue("tail", DEFAULT_TAIL);
-    useEventMD5 = getBooleanValue("use_event_md5_as_id", DEFAULT_USE_EVENT_MD5);
-    genEventMD5 = getBooleanValue("gen_event_md5", DEFAULT_GEN_EVENT_MD5);
+    tail = BooleanUtils.toBooleanDefaultIfNull(inputDescriptor.isTail(), DEFAULT_TAIL);
+    useEventMD5 = BooleanUtils.toBooleanDefaultIfNull(inputDescriptor.isUseEventMd5AsId(), DEFAULT_USE_EVENT_MD5);
+    genEventMD5 = BooleanUtils.toBooleanDefaultIfNull(inputDescriptor.isGenEventMd5(), DEFAULT_GEN_EVENT_MD5);
 
     if (firstFilter != null) {
       firstFilter.init();
@@ -236,26 +247,26 @@ public abstract class Input extends ConfigBlock implements Runnable {
   }
 
   private void initCache() {
-    boolean cacheEnabled = getConfigValue(CACHE_ENABLED) != null
-      ? getBooleanValue(CACHE_ENABLED, DEFAULT_CACHE_ENABLED)
+    boolean cacheEnabled = inputDescriptor.isCacheEnabled() != null
+      ? inputDescriptor.isCacheEnabled()
       : LogFeederUtil.getBooleanProperty("logfeeder.cache.enabled", DEFAULT_CACHE_ENABLED);
     if (cacheEnabled) {
-      String cacheKeyField = getConfigValue(CACHE_KEY_FIELD) != null
-        ? getStringValue(CACHE_KEY_FIELD)
+      String cacheKeyField = inputDescriptor.getCacheKeyField() != null
+        ? inputDescriptor.getCacheKeyField()
         : LogFeederUtil.getStringProperty("logfeeder.cache.key.field", DEFAULT_CACHE_KEY_FIELD);
 
-      setCacheKeyField(getStringValue(cacheKeyField));
+      setCacheKeyField(cacheKeyField);
 
-      boolean cacheLastDedupEnabled = getConfigValue(CACHE_LAST_DEDUP_ENABLED) != null
-        ? getBooleanValue(CACHE_LAST_DEDUP_ENABLED, DEFAULT_CACHE_DEDUP_LAST)
+      boolean cacheLastDedupEnabled = inputDescriptor.getCacheLastDedupEnabled() != null
+        ? inputDescriptor.getCacheLastDedupEnabled()
         : LogFeederUtil.getBooleanProperty("logfeeder.cache.last.dedup.enabled", DEFAULT_CACHE_DEDUP_LAST);
 
-      int cacheSize = getConfigValue(CACHE_SIZE) != null
-        ? getIntValue(CACHE_SIZE, DEFAULT_CACHE_SIZE)
+      int cacheSize = inputDescriptor.getCacheSize() != null
+        ? inputDescriptor.getCacheSize()
         : LogFeederUtil.getIntProperty("logfeeder.cache.size", DEFAULT_CACHE_SIZE);
 
-      long cacheDedupInterval = getConfigValue(CACHE_DEDUP_INTERVAL) != null
-        ? getLongValue(CACHE_DEDUP_INTERVAL, DEFAULT_CACHE_DEDUP_INTERVAL)
+      long cacheDedupInterval = inputDescriptor.getCacheDedupInterval() != null
+        ? inputDescriptor.getCacheDedupInterval()
         : Long.parseLong(LogFeederUtil.getStringProperty("logfeeder.cache.dedup.interval", String.valueOf(DEFAULT_CACHE_DEDUP_INTERVAL)));
 
       setCache(new LRUCache(cacheSize, filePath, cacheDedupInterval, cacheLastDedupEnabled));
@@ -319,6 +330,11 @@ public abstract class Input extends ConfigBlock implements Runnable {
   }
 
   @Override
+  public boolean isEnabled() {
+    return BooleanUtils.isNotFalse(inputDescriptor.isEnabled());
+  }
+
+  @Override
   public String getNameForThread() {
     if (filePath != null) {
       try {
@@ -331,7 +347,17 @@ public abstract class Input extends ConfigBlock implements Runnable {
   }
 
   @Override
+  public boolean logConfigs(Priority level) {
+    if (!super.logConfigs(level)) {
+      return false;
+    }
+    LOG.log(level, "Printing Input=" + getShortDescription());
+    LOG.log(level, "description=" + inputDescriptor.getPath());
+    return true;
+  }
+
+  @Override
   public String toString() {
     return getShortDescription();
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputFile.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputFile.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputFile.java
index 3737839..fc40ca4 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputFile.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputFile.java
@@ -25,7 +25,9 @@ import java.io.FileNotFoundException;
 
 import org.apache.ambari.logfeeder.input.reader.LogsearchReaderFactory;
 import org.apache.ambari.logfeeder.util.FileUtil;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputFileDescriptor;
 import org.apache.commons.io.filefilter.WildcardFileFilter;
+import org.apache.commons.lang.BooleanUtils;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.solr.common.util.Base64;
 
@@ -62,7 +64,7 @@ public class InputFile extends AbstractInputFile {
 
   @Override
   void start() throws Exception {
-    boolean isProcessFile = getBooleanValue("process_file", true);
+    boolean isProcessFile = BooleanUtils.toBooleanDefaultIfNull(((InputFileDescriptor)inputDescriptor).getProcessFile(), true);
     if (isProcessFile) {
       if (tail) {
         processFile(logFiles[0]);
@@ -100,7 +102,7 @@ public class InputFile extends AbstractInputFile {
   }
 
   private void copyFiles(File[] files) {
-    boolean isCopyFile = getBooleanValue("copy_file", false);
+    boolean isCopyFile = BooleanUtils.toBooleanDefaultIfNull(((InputFileDescriptor)inputDescriptor).getCopyFile(), false);
     if (isCopyFile && files != null) {
       for (File file : files) {
         try {

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputS3File.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputS3File.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputS3File.java
index f560379..4bf162b 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputS3File.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputS3File.java
@@ -23,6 +23,7 @@ import java.io.File;
 import java.io.IOException;
 
 import org.apache.ambari.logfeeder.util.S3Util;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputS3FileDescriptor;
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.solr.common.util.Base64;
 
@@ -78,8 +79,8 @@ public class InputS3File extends AbstractInputFile {
 
   @Override
   protected BufferedReader openLogFile(File logPathFile) throws IOException {
-    String s3AccessKey = getStringValue("s3_access_key");
-    String s3SecretKey = getStringValue("s3_secret_key");
+    String s3AccessKey = ((InputS3FileDescriptor)inputDescriptor).getS3AccessKey();
+    String s3SecretKey = ((InputS3FileDescriptor)inputDescriptor).getS3SecretKey();
     BufferedReader br = S3Util.getReader(logPathFile.getPath(), s3AccessKey, s3SecretKey);
     fileKey = getFileKey(logPathFile);
     base64FileKey = Base64.byteArrayToBase64(fileKey.toString().getBytes());

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputSimulate.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputSimulate.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputSimulate.java
index d193cdb..5e7bdb3 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputSimulate.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputSimulate.java
@@ -21,7 +21,6 @@ package org.apache.ambari.logfeeder.input;
 import java.net.InetAddress;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.List;
@@ -35,25 +34,23 @@ import org.apache.ambari.logfeeder.filter.Filter;
 import org.apache.ambari.logfeeder.filter.FilterJSON;
 import org.apache.ambari.logfeeder.output.Output;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.FilterJsonDescriptorImpl;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputDescriptorImpl;
 import org.apache.commons.collections.MapUtils;
-import org.apache.log4j.Logger;
 import org.apache.solr.common.util.Base64;
 
 import com.google.common.base.Joiner;
 
 public class InputSimulate extends Input {
-  private static final Logger LOG = Logger.getLogger(InputSimulate.class);
-
   private static final String LOG_TEXT_PATTERN = "{ logtime=\"%d\", level=\"%s\", log_message=\"%s\", host=\"%s\"}";
   
   private static final Map<String, String> typeToFilePath = new HashMap<>();
-  private static List<String> inputTypes = new ArrayList<>();
-  public static void loadTypeToFilePath(List<Map<String, Object>> inputList) {
-    for (Map<String, Object> input : inputList) {
-      if (input.containsKey("type") && input.containsKey("path")) {
-        typeToFilePath.put((String)input.get("type"), (String)input.get("path"));
-        inputTypes.add((String)input.get("type"));
-      }
+  private static final List<String> inputTypes = new ArrayList<>();
+  public static void loadTypeToFilePath(List<InputDescriptor> inputList) {
+    for (InputDescriptor input : inputList) {
+      typeToFilePath.put(input.getType(), input.getPath());
+      inputTypes.add(input.getType());
     }
   }
   
@@ -86,7 +83,7 @@ public class InputSimulate extends Input {
     this.host = "#" + hostNumber.incrementAndGet() + "-" + LogFeederUtil.hostName;
     
     Filter filter = new FilterJSON();
-    filter.loadConfig(Collections.<String, Object> emptyMap());
+    filter.loadConfig(new FilterJsonDescriptorImpl());
     filter.setInput(this);
     addFilter(filter);
   }
@@ -141,7 +138,7 @@ public class InputSimulate extends Input {
     String type = types.get(typePos);
     String filePath = MapUtils.getString(typeToFilePath, type, "path of " + type);
     
-    configs.put("type", type);
+    ((InputDescriptorImpl)inputDescriptor).setType(type);
     setFilePath(filePath);
     
     return type;

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/FilterLogData.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/FilterLogData.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/FilterLogData.java
index 1f635af..6173f53 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/FilterLogData.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/FilterLogData.java
@@ -44,7 +44,7 @@ public enum FilterLogData {
   }
 
   public boolean isAllowed(Map<String, Object> jsonObj, InputMarker inputMarker) {
-    if ("audit".equals(inputMarker.input.getConfigs().get(LogFeederConstants.ROW_TYPE)))
+    if ("audit".equals(inputMarker.input.getInputDescriptor().getRowtype()))
       return true;
     
     boolean isAllowed = applyFilter(jsonObj);

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/Mapper.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/Mapper.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/Mapper.java
index 96709c0..5facf76 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/Mapper.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/Mapper.java
@@ -21,12 +21,14 @@ package org.apache.ambari.logfeeder.mapper;
 
 import java.util.Map;
 
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+
 public abstract class Mapper {
   private String inputDesc;
   protected String fieldName;
   private String mapClassCode;
 
-  public abstract boolean init(String inputDesc, String fieldName, String mapClassCode, Object mapConfigs);
+  public abstract boolean init(String inputDesc, String fieldName, String mapClassCode, MapFieldDescriptor mapFieldDescriptor);
 
   protected void init(String inputDesc, String fieldName, String mapClassCode) {
     this.inputDesc = inputDesc;

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
index 6a7fad7..5d34c06 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
@@ -26,6 +26,8 @@ import java.util.Map;
 
 import org.apache.ambari.logfeeder.common.LogFeederConstants;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapDateDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
 import org.apache.commons.lang.time.DateUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Level;
@@ -39,18 +41,11 @@ public class MapperDate extends Mapper {
   private SimpleDateFormat srcDateFormatter=null;
 
   @Override
-  public boolean init(String inputDesc, String fieldName, String mapClassCode, Object mapConfigs) {
+  public boolean init(String inputDesc, String fieldName, String mapClassCode, MapFieldDescriptor mapFieldDescriptor) {
     init(inputDesc, fieldName, mapClassCode);
-    if (!(mapConfigs instanceof Map)) {
-      LOG.fatal("Can't initialize object. mapConfigs class is not of type Map. " + mapConfigs.getClass().getName() +
-        ", map=" + this);
-      return false;
-    }
     
-    @SuppressWarnings("unchecked")
-    Map<String, Object> mapObjects = (Map<String, Object>) mapConfigs;
-    String targetDateFormat = (String) mapObjects.get("target_date_pattern");
-    String srcDateFormat = (String) mapObjects.get("src_date_pattern");
+    String targetDateFormat = ((MapDateDescriptor)mapFieldDescriptor).getTargetDatePattern();
+    String srcDateFormat = ((MapDateDescriptor)mapFieldDescriptor).getSourceDatePattern();
     if (StringUtils.isEmpty(targetDateFormat)) {
       LOG.fatal("Date format for map is empty. " + this);
     } else {

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopy.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopy.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopy.java
index 39e1ff4..a463f49 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopy.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopy.java
@@ -21,6 +21,8 @@ package org.apache.ambari.logfeeder.mapper;
 
 import java.util.Map;
 
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldCopyDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Logger;
 
@@ -33,16 +35,9 @@ public class MapperFieldCopy extends Mapper {
   private String copyName = null;
 
   @Override
-  public boolean init(String inputDesc, String fieldName, String mapClassCode, Object mapConfigs) {
+  public boolean init(String inputDesc, String fieldName, String mapClassCode, MapFieldDescriptor mapFieldDescriptor) {
     init(inputDesc, fieldName, mapClassCode);
-    if (!(mapConfigs instanceof Map)) {
-      LOG.fatal("Can't initialize object. mapConfigs class is not of type Map. " + mapConfigs.getClass().getName());
-      return false;
-    }
-    
-    @SuppressWarnings("unchecked")
-    Map<String, Object> mapObjects = (Map<String, Object>) mapConfigs;
-    copyName = (String) mapObjects.get("copy_name");
+    copyName = ((MapFieldCopyDescriptor)mapFieldDescriptor).getCopyName();
     if (StringUtils.isEmpty(copyName)) {
       LOG.fatal("Map copy name is empty.");
       return false;

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldName.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldName.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldName.java
index 9b6e83c..3f160da 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldName.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldName.java
@@ -22,6 +22,8 @@ package org.apache.ambari.logfeeder.mapper;
 import java.util.Map;
 
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldNameDescriptor;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -35,16 +37,10 @@ public class MapperFieldName extends Mapper {
   private String newValue = null;
 
   @Override
-  public boolean init(String inputDesc, String fieldName, String mapClassCode, Object mapConfigs) {
+  public boolean init(String inputDesc, String fieldName, String mapClassCode, MapFieldDescriptor mapFieldDescriptor) {
     init(inputDesc, fieldName, mapClassCode);
-    if (!(mapConfigs instanceof Map)) {
-      LOG.fatal("Can't initialize object. mapConfigs class is not of type Map. " + mapConfigs.getClass().getName());
-      return false;
-    }
-    
-    @SuppressWarnings("unchecked")
-    Map<String, Object> mapObjects = (Map<String, Object>) mapConfigs;
-    newValue = (String) mapObjects.get("new_fieldname");
+
+    newValue = ((MapFieldNameDescriptor)mapFieldDescriptor).getNewFieldName();
     if (StringUtils.isEmpty(newValue)) {
       LOG.fatal("Map field value is empty.");
       return false;

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldValue.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldValue.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldValue.java
index 87cda65..03ff95b 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldValue.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldValue.java
@@ -22,6 +22,8 @@ package org.apache.ambari.logfeeder.mapper;
 import java.util.Map;
 
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldValueDescriptor;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -36,17 +38,11 @@ public class MapperFieldValue extends Mapper {
   private String newValue = null;
 
   @Override
-  public boolean init(String inputDesc, String fieldName, String mapClassCode, Object mapConfigs) {
+  public boolean init(String inputDesc, String fieldName, String mapClassCode, MapFieldDescriptor mapFieldDescriptor) {
     init(inputDesc, fieldName, mapClassCode);
-    if (!(mapConfigs instanceof Map)) {
-      LOG.fatal("Can't initialize object. mapConfigs class is not of type Map. " + mapConfigs.getClass().getName());
-      return false;
-    }
     
-    @SuppressWarnings("unchecked")
-    Map<String, Object> mapObjects = (Map<String, Object>) mapConfigs;
-    prevValue = (String) mapObjects.get("pre_value");
-    newValue = (String) mapObjects.get("post_value");
+    prevValue = ((MapFieldValueDescriptor)mapFieldDescriptor).getPreValue();
+    newValue = ((MapFieldValueDescriptor)mapFieldDescriptor).getPostValue();;
     if (StringUtils.isEmpty(newValue)) {
       LOG.fatal("Map field value is empty.");
       return false;

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/Output.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/Output.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/Output.java
index bc6a553..65b9e19 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/Output.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/Output.java
@@ -28,11 +28,8 @@ import org.apache.ambari.logfeeder.common.ConfigBlock;
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.apache.log4j.Logger;
 
 public abstract class Output extends ConfigBlock {
-  private static final Logger LOG = Logger.getLogger(Output.class);
-
   private String destination = null;
 
   protected MetricData writeBytesMetric = new MetricData(getWriteBytesMetricName(), false);

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputLineFilter.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputLineFilter.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputLineFilter.java
index fcf2695..8308a4f 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputLineFilter.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputLineFilter.java
@@ -41,7 +41,7 @@ public class OutputLineFilter {
   public Boolean apply(Map<String, Object> lineMap, Input input) {
     boolean isLogFilteredOut = false;
     LRUCache inputLruCache = input.getCache();
-    if (inputLruCache != null && "service".equals(input.getConfigs().get(LogFeederConstants.ROW_TYPE))) {
+    if (inputLruCache != null && "service".equals(input.getInputDescriptor().getRowtype())) {
       String logMessage = (String) lineMap.get(input.getCacheKeyField());
       Long timestamp = null;
       if (lineMap.containsKey((LogFeederConstants.IN_MEMORY_TIMESTAMP))) {


[49/50] [abbrv] ambari git commit: Merge remote-tracking branch 'origin/trunk' into ambari-rest-api-explorer

Posted by ad...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/main/assemblies/server.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestRequest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/RequestRequest.java
index 5dfc148,05c4bad..ca9cf4c
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestRequest.java
@@@ -39,8 -37,9 +39,10 @@@ public class RequestRequest 
  
    private String abortReason;
  
+   private boolean removePendingHostRequests = false;
+ 
  
 +  @ApiModelProperty(name = "request_status", notes = "Only valid value is ABORTED.")
    public HostRoleStatus getStatus() {
      return status;
    }

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ArtifactResourceProvider.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
index 1fc4bd5,d82ff25..f41eb26
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
@@@ -91,40 -92,42 +92,44 @@@ public class RequestResourceProvider ex
  
    // ----- Property ID constants ---------------------------------------------
    // Requests
 -  public static final String REQUEST_CLUSTER_NAME_PROPERTY_ID = "Requests/cluster_name";
 -  public static final String REQUEST_CLUSTER_ID_PROPERTY_ID = "Requests/cluster_id";
 -  public static final String REQUEST_ID_PROPERTY_ID = "Requests/id";
 -  protected static final String REQUEST_STATUS_PROPERTY_ID = "Requests/request_status";
 -  protected static final String REQUEST_ABORT_REASON_PROPERTY_ID = "Requests/abort_reason";
 -  protected static final String REQUEST_CONTEXT_ID = "Requests/request_context";
 -  public static final String REQUEST_SOURCE_SCHEDULE = "Requests/request_schedule";
 -  public static final String REQUEST_SOURCE_SCHEDULE_ID = "Requests/request_schedule/schedule_id";
 -  public static final String REQUEST_SOURCE_SCHEDULE_HREF = "Requests/request_schedule/href";
 -  protected static final String REQUEST_TYPE_ID = "Requests/type";
 -  protected static final String REQUEST_INPUTS_ID = "Requests/inputs";
 -  protected static final String REQUEST_CLUSTER_HOST_INFO_ID = "Requests/cluster_host_info";
 -  protected static final String REQUEST_RESOURCE_FILTER_ID = "Requests/resource_filters";
 -  protected static final String REQUEST_OPERATION_LEVEL_ID = "Requests/operation_level";
 -  protected static final String REQUEST_CREATE_TIME_ID = "Requests/create_time";
 -  protected static final String REQUEST_START_TIME_ID = "Requests/start_time";
 -  protected static final String REQUEST_END_TIME_ID = "Requests/end_time";
 -  protected static final String REQUEST_EXCLUSIVE_ID = "Requests/exclusive";
 -  protected static final String REQUEST_TASK_CNT_ID = "Requests/task_count";
 -  protected static final String REQUEST_FAILED_TASK_CNT_ID = "Requests/failed_task_count";
 -  protected static final String REQUEST_ABORTED_TASK_CNT_ID = "Requests/aborted_task_count";
 -  protected static final String REQUEST_TIMED_OUT_TASK_CNT_ID = "Requests/timed_out_task_count";
 -  protected static final String REQUEST_COMPLETED_TASK_CNT_ID = "Requests/completed_task_count";
 -  protected static final String REQUEST_QUEUED_TASK_CNT_ID = "Requests/queued_task_count";
 -  protected static final String REQUEST_PROGRESS_PERCENT_ID = "Requests/progress_percent";
 -  protected static final String REQUEST_REMOVE_PENDING_HOST_REQUESTS_ID = "Requests/remove_pending_host_requests";
 -  protected static final String REQUEST_PENDING_HOST_REQUEST_COUNT_ID = "Requests/pending_host_request_count";
 +  public static final String REQUESTS = "Requests";
 +  public static final String REQUEST_INFO = "RequestInfo";
 +  public static final String REQUEST_CLUSTER_NAME_PROPERTY_ID = REQUESTS + "/cluster_name";
 +  public static final String REQUEST_CLUSTER_ID_PROPERTY_ID = REQUESTS + "/cluster_id";
 +  public static final String REQUEST_ID_PROPERTY_ID = REQUESTS + "/id";
 +  public static final String REQUEST_STATUS_PROPERTY_ID = REQUESTS + "/request_status";
 +  public static final String REQUEST_ABORT_REASON_PROPERTY_ID = REQUESTS + "/abort_reason";
 +  public static final String REQUEST_CONTEXT_ID = REQUESTS + "/request_context";
 +  public static final String REQUEST_SOURCE_SCHEDULE = REQUESTS + "/request_schedule";
 +  public static final String REQUEST_SOURCE_SCHEDULE_ID = REQUESTS + "/request_schedule/schedule_id";
 +  public static final String REQUEST_SOURCE_SCHEDULE_HREF = REQUESTS + "/request_schedule/href";
 +  public static final String REQUEST_TYPE_ID = REQUESTS + "/type";
 +  public static final String REQUEST_INPUTS_ID = REQUESTS + "/inputs";
++  public static final String REQUEST_CLUSTER_HOST_INFO_ID = REQUESTS + "/cluster_host_info";
 +  public static final String REQUEST_RESOURCE_FILTER_ID = REQUESTS + "/resource_filters";
 +  public static final String REQUEST_OPERATION_LEVEL_ID = REQUESTS + "/operation_level";
 +  public static final String REQUEST_CREATE_TIME_ID = REQUESTS + "/create_time";
 +  public static final String REQUEST_START_TIME_ID = REQUESTS + "/start_time";
 +  public static final String REQUEST_END_TIME_ID = REQUESTS + "/end_time";
 +  public static final String REQUEST_EXCLUSIVE_ID = REQUESTS + "/exclusive";
 +  public static final String REQUEST_TASK_CNT_ID = REQUESTS + "/task_count";
 +  public static final String REQUEST_FAILED_TASK_CNT_ID = REQUESTS + "/failed_task_count";
 +  public static final String REQUEST_ABORTED_TASK_CNT_ID = REQUESTS + "/aborted_task_count";
 +  public static final String REQUEST_TIMED_OUT_TASK_CNT_ID = REQUESTS + "/timed_out_task_count";
 +  public static final String REQUEST_COMPLETED_TASK_CNT_ID = REQUESTS + "/completed_task_count";
 +  public static final String REQUEST_QUEUED_TASK_CNT_ID = REQUESTS + "/queued_task_count";
 +  public static final String REQUEST_PROGRESS_PERCENT_ID = REQUESTS + "/progress_percent";
++  public static final String REQUEST_REMOVE_PENDING_HOST_REQUESTS_ID = REQUESTS + "/remove_pending_host_requests";
++  public static final String REQUEST_PENDING_HOST_REQUEST_COUNT_ID = REQUESTS + "/pending_host_request_count";
 +  public static final String COMMAND_ID = "command";
 +  public static final String SERVICE_ID = "service_name";
 +  public static final String COMPONENT_ID = "component_name";
 +  public static final String HOSTS_ID = "hosts"; // This is actually a list of hosts
 +  public static final String HOSTS_PREDICATE = "hosts_predicate";
 +  public static final String ACTION_ID = "action";
 +  public static final String INPUTS_ID = "parameters";
 +  public static final String EXLUSIVE_ID = "exclusive";
+ 
 -  protected static final String COMMAND_ID = "command";
 -  protected static final String SERVICE_ID = "service_name";
 -  protected static final String COMPONENT_ID = "component_name";
 -  protected static final String HOSTS_ID = "hosts"; // This is actually a list of hosts
 -  protected static final String HOSTS_PREDICATE = "hosts_predicate";
 -  protected static final String ACTION_ID = "action";
 -  protected static final String INPUTS_ID = "parameters";
 -  protected static final String EXLUSIVE_ID = "exclusive";
    private static Set<String> pkPropertyIds =
      new HashSet<>(Arrays.asList(new String[]{
        REQUEST_ID_PROPERTY_ID}));

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/main/java/org/apache/ambari/server/state/ValueAttributesInfo.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 7e4c4c2,cbd5de3..83ba0bb
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@@ -8998,9 -9017,11 +8997,11 @@@ public class AmbariManagementController
  
      Assert.assertNull(topologyHostInfoDAO.findByHostname(host1));
  
+     Long firstHostId = clusters.getHost(host1).getHostId();
+ 
      // Deletion without specifying cluster should be successful
      requests.clear();
 -    requests.add(new HostRequest(host1, null, null));
 +    requests.add(new HostRequest(host1, null));
      try {
        HostResourceProviderTest.deleteHosts(controller, requests);
      } catch (Exception e) {
@@@ -9011,9 -9032,13 +9012,13 @@@
      Assert.assertFalse(clusters.getClustersForHost(host1).contains(cluster));
      Assert.assertNull(topologyHostInfoDAO.findByHostname(host1));
  
+     // verify there are no host role commands for the host
+     List<HostRoleCommandEntity> tasks = hostRoleCommandDAO.findByHostId(firstHostId);
+     assertEquals(0, tasks.size());
+ 
      // Case 3: Delete host that is still part of the cluster, and specify the cluster_name in the request
      requests.clear();
 -    requests.add(new HostRequest(host2, cluster1, null));
 +    requests.add(new HostRequest(host2, cluster1));
      try {
        HostResourceProviderTest.deleteHosts(controller, requests);
      } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
index 01dd8e3,8772b24..78752dc
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
@@@ -1127,9 -1145,11 +1127,11 @@@ public class HostResourceProviderTest e
      HostResponse response = createNiceMock(HostResponse.class);
  
      Set<Cluster> setCluster = Collections.singleton(cluster);
+     Map<String, DesiredConfig> desiredConfigs = new HashMap<>();
+     Map<String, HostConfig> desiredHostConfigs = new HashMap<>();
  
      // requests
 -    HostRequest request1 = new HostRequest("host1", "cluster1", Collections.<String, String>emptyMap());
 +    HostRequest request1 = new HostRequest("host1", "cluster1");
  
      Set<HostRequest> setRequests = new HashSet<>();
      setRequests.add(request1);

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/651bdcbd/ambari-web/pom.xml
----------------------------------------------------------------------


[46/50] [abbrv] ambari git commit: AMBARI-20904.WFM: Include an option to clear filters in workflow dashboard(Anita Jebaraj via Venkata Sairam)

Posted by ad...@apache.org.
AMBARI-20904.WFM: Include an option to clear filters in workflow dashboard(Anita Jebaraj via Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/201677be
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/201677be
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/201677be

Branch: refs/heads/ambari-rest-api-explorer
Commit: 201677be8deec6e3d898126fbc23b2114be2fa4c
Parents: 9415478
Author: Venkata Sairam <ve...@gmail.com>
Authored: Tue May 23 08:42:36 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Tue May 23 08:42:36 2017 +0530

----------------------------------------------------------------------
 .../ui/app/components/search-create-new-bar.js          | 12 +++++++++++-
 .../app/templates/components/search-create-new-bar.hbs  |  4 ++++
 2 files changed, 15 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/201677be/contrib/views/wfmanager/src/main/resources/ui/app/components/search-create-new-bar.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/search-create-new-bar.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/search-create-new-bar.js
index 4bfb5fa..ed761c7 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/search-create-new-bar.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/search-create-new-bar.js
@@ -168,7 +168,10 @@ export default Ember.Component.extend(Ember.Evented,{
       }
       this.sendAction('onSearch', { type: this.get('jobType'), filter: this.getAllFilters() });
     },
-
+    doClearFilters(){
+      this.filter={};
+      this.sendAction('onSearch', { type: this.get('jobType'), filter: this.getAllFilters() });
+    },
     getAllFilters(){
       var allFilters = [];
       Object.keys(this.filter).forEach(function(value){
@@ -204,6 +207,13 @@ export default Ember.Component.extend(Ember.Evented,{
             this.$("#endDate").trigger("dp.show");
           }
         },
+        clearFilters() {
+          this.$("#startDate").val('');
+          this.$("#endDate").val('');
+          this.$('#search-field').tagsinput('removeAll');
+          this.$('.tt-input').val('');
+          this.doClearFilters();
+        },
         onClear(type) {
           if (type ==='start' && this.get('startDate') === "") {
             this.filterByDate("", type);

http://git-wip-us.apache.org/repos/asf/ambari/blob/201677be/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-create-new-bar.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-create-new-bar.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-create-new-bar.hbs
index b9c6029..8bdc768 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-create-new-bar.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-create-new-bar.hbs
@@ -38,6 +38,10 @@
     <input type='text' class="form-control" value={{endDate}} id='endDate' title="Nominal End Date" placeholder="Nominal End Date" {{action 'onClear' 'end' on="change"}}/>
   </div>
   <div class="form-group">
+    <span title="Clear Filters" {{action 'clearFilters'}} class="fa fa-close fa-1 pointer btn btn-default" aria-hidden="true"></span>
+  </div>
+  <div class="form-group">
     <span class="fa fa-refresh fa-1 pointer btn btn-default" title="Refresh" {{action 'refresh'}} aria-hidden="true"></span>
   </div>
 </div>
+


[48/50] [abbrv] ambari git commit: AMBARI-21084.Files view on IE 11- On Concatenating files or downloading, the concatenated or downloaded file occupies the entire UI.(Venkata Sairam)

Posted by ad...@apache.org.
AMBARI-21084.Files view on IE 11- On Concatenating files or downloading, the concatenated or downloaded file occupies the entire UI.(Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7c929539
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7c929539
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7c929539

Branch: refs/heads/ambari-rest-api-explorer
Commit: 7c9295395923a95ada6e51100b14d1e535590caa
Parents: 32501f6
Author: Venkata Sairam <ve...@gmail.com>
Authored: Tue May 23 14:26:15 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Tue May 23 14:26:15 2017 +0530

----------------------------------------------------------------------
 .../java/org/apache/ambari/view/filebrowser/DownloadService.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7c929539/contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/DownloadService.java
----------------------------------------------------------------------
diff --git a/contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/DownloadService.java b/contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/DownloadService.java
index 10b7c9e..1334c06 100644
--- a/contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/DownloadService.java
+++ b/contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/DownloadService.java
@@ -112,7 +112,7 @@ public class DownloadService extends HdfsService {
       ResponseBuilder result = Response.ok(fs);
       if (download) {
         result.header("Content-Disposition",
-          "inline; filename=\"" + status.getPath().getName() + "\"").type(MediaType.APPLICATION_OCTET_STREAM);
+          "attachment; filename=\"" + status.getPath().getName() + "\"").type(MediaType.APPLICATION_OCTET_STREAM);
       } else {
         FileNameMap fileNameMap = URLConnection.getFileNameMap();
         String mimeType = fileNameMap.getContentTypeFor(status.getPath().getName());
@@ -278,7 +278,7 @@ public class DownloadService extends HdfsService {
       };
       ResponseBuilder response = Response.ok(result);
       if (request.download) {
-        response.header("Content-Disposition", "inline; filename=\"concatResult.txt\"").type(MediaType.APPLICATION_OCTET_STREAM);
+        response.header("Content-Disposition", "attachment; filename=\"concatResult.txt\"").type(MediaType.APPLICATION_OCTET_STREAM);
       } else {
         response.header("Content-Disposition", "filename=\"concatResult.txt\"").type(MediaType.TEXT_PLAIN);
       }


[36/50] [abbrv] ambari git commit: AMBARI-21033 Log Search use POJOs for input configuration (mgergely)

Posted by ad...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputManager.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputManager.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputManager.java
index ba872f8..4d6c43b 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputManager.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputManager.java
@@ -70,7 +70,7 @@ public class OutputManager {
     Input input = inputMarker.input;
 
     // Update the block with the context fields
-    for (Map.Entry<String, String> entry : input.getContextFields().entrySet()) {
+    for (Map.Entry<String, String> entry : input.getInputDescriptor().getAddFields().entrySet()) {
       if (jsonObj.get(entry.getKey()) == null || entry.getKey().equals("cluster") && "null".equals(jsonObj.get(entry.getKey()))) {
         jsonObj.put(entry.getKey(), entry.getValue());
       }
@@ -79,13 +79,13 @@ public class OutputManager {
     // TODO: Ideally most of the overrides should be configurable
 
     if (jsonObj.get("type") == null) {
-      jsonObj.put("type", input.getStringValue("type"));
+      jsonObj.put("type", input.getInputDescriptor().getType());
     }
     if (jsonObj.get("path") == null && input.getFilePath() != null) {
       jsonObj.put("path", input.getFilePath());
     }
-    if (jsonObj.get("path") == null && input.getStringValue("path") != null) {
-      jsonObj.put("path", input.getStringValue("path"));
+    if (jsonObj.get("path") == null && input.getInputDescriptor().getPath() != null) {
+      jsonObj.put("path", input.getInputDescriptor().getPath());
     }
     if (jsonObj.get("host") == null && LogFeederUtil.hostName != null) {
       jsonObj.put("host", LogFeederUtil.hostName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputS3File.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputS3File.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputS3File.java
index d0f51b2..076d12d 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputS3File.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputS3File.java
@@ -19,9 +19,6 @@
 package org.apache.ambari.logfeeder.output;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-import org.apache.ambari.logfeeder.common.ConfigHandler;
 import org.apache.ambari.logfeeder.common.LogFeederConstants;
 import org.apache.ambari.logfeeder.filter.Filter;
 import org.apache.ambari.logfeeder.input.InputMarker;
@@ -31,11 +28,18 @@ import org.apache.ambari.logfeeder.output.spool.RolloverCondition;
 import org.apache.ambari.logfeeder.output.spool.RolloverHandler;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
 import org.apache.ambari.logfeeder.util.S3Util;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputS3FileDescriptor;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputConfigGson;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputConfigImpl;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputDescriptorImpl;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputS3FileDescriptorImpl;
 import org.apache.log4j.Logger;
 
 import java.io.File;
-import java.util.*;
-import java.util.Map.Entry;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
 
 
 /**
@@ -50,7 +54,6 @@ import java.util.Map.Entry;
 public class OutputS3File extends Output implements RolloverCondition, RolloverHandler {
   private static final Logger LOG = Logger.getLogger(OutputS3File.class);
 
-  public static final String INPUT_ATTRIBUTE_TYPE = "type";
   public static final String GLOBAL_CONFIG_S3_PATH_SUFFIX = "global.config.json";
 
   private LogSpooler logSpooler;
@@ -72,9 +75,9 @@ public class OutputS3File extends Output implements RolloverCondition, RolloverH
    */
   @Override
   public void copyFile(File inputFile, InputMarker inputMarker) {
-    String type = inputMarker.input.getStringValue(INPUT_ATTRIBUTE_TYPE);
+    String type = inputMarker.input.getInputDescriptor().getType();
     S3Uploader s3Uploader = new S3Uploader(s3OutputConfiguration, false, type);
-    String resolvedPath = s3Uploader.uploadFile(inputFile, inputMarker.input.getStringValue(INPUT_ATTRIBUTE_TYPE));
+    String resolvedPath = s3Uploader.uploadFile(inputFile, inputMarker.input.getInputDescriptor().getType());
 
     uploadConfig(inputMarker, type, s3OutputConfiguration, resolvedPath);
   }
@@ -82,43 +85,43 @@ public class OutputS3File extends Output implements RolloverCondition, RolloverH
   private void uploadConfig(InputMarker inputMarker, String type, S3OutputConfiguration s3OutputConfiguration,
       String resolvedPath) {
 
-    ArrayList<Map<String, Object>> filters = new ArrayList<>();
+    ArrayList<FilterDescriptor> filters = new ArrayList<>();
     addFilters(filters, inputMarker.input.getFirstFilter());
-    Map<String, Object> inputConfig = new HashMap<>();
-    inputConfig.putAll(inputMarker.input.getConfigs());
+    InputS3FileDescriptor inputS3FileDescriptorOriginal = (InputS3FileDescriptor) inputMarker.input.getInputDescriptor();
+    InputS3FileDescriptorImpl inputS3FileDescriptor = InputConfigGson.gson.fromJson(
+        InputConfigGson.gson.toJson(inputS3FileDescriptorOriginal), InputS3FileDescriptorImpl.class);
     String s3CompletePath = LogFeederConstants.S3_PATH_START_WITH + s3OutputConfiguration.getS3BucketName() +
         LogFeederConstants.S3_PATH_SEPARATOR + resolvedPath;
-    inputConfig.put("path", s3CompletePath);
+    inputS3FileDescriptor.setPath(s3CompletePath);
 
-    ArrayList<Map<String, Object>> inputConfigList = new ArrayList<>();
-    inputConfigList.add(inputConfig);
+    ArrayList<InputDescriptorImpl> inputConfigList = new ArrayList<>();
+    inputConfigList.add(inputS3FileDescriptor);
     // set source s3_file
-    // remove global config from filter config
-    removeGlobalConfig(inputConfigList);
-    removeGlobalConfig(filters);
+    // remove global config from input config
+    removeS3GlobalConfig(inputS3FileDescriptor);
     // write config into s3 file
-    Map<String, Object> config = new HashMap<>();
-    config.put("filter", filters);
-    config.put("input", inputConfigList);
-    writeConfigToS3(config, getComponentConfigFileName(type), s3OutputConfiguration);
+    InputConfigImpl inputConfig = new InputConfigImpl();
+    inputConfig.setInput(inputConfigList);
+    
+    writeConfigToS3(inputConfig, getComponentConfigFileName(type), s3OutputConfiguration);
     // write global config
     writeGlobalConfig(s3OutputConfiguration);
   }
 
-  private void addFilters(ArrayList<Map<String, Object>> filters, Filter filter) {
+  private void addFilters(ArrayList<FilterDescriptor> filters, Filter filter) {
     if (filter != null) {
-      Map<String, Object> filterConfig = new HashMap<String, Object>();
-      filterConfig.putAll(filter.getConfigs());
-      filters.add(filterConfig);
+      FilterDescriptor filterDescriptorOriginal = filter.getFilterDescriptor();
+      FilterDescriptor filterDescriptor = InputConfigGson.gson.fromJson(
+          InputConfigGson.gson.toJson(filterDescriptorOriginal), filterDescriptorOriginal.getClass());
+      filters.add(filterDescriptor);
       if (filter.getNextFilter() != null) {
         addFilters(filters, filter.getNextFilter());
       }
     }
   }
 
-  private void writeConfigToS3(Map<String, Object> configToWrite, String s3KeySuffix, S3OutputConfiguration s3OutputConfiguration) {
-    Gson gson = new GsonBuilder().setPrettyPrinting().create();
-    String configJson = gson.toJson(configToWrite);
+  private void writeConfigToS3(Object config, String s3KeySuffix, S3OutputConfiguration s3OutputConfiguration) {
+    String configJson = InputConfigGson.gson.toJson(config);
 
     String s3ResolvedKey = new S3LogPathResolver().getResolvedPath(getStringValue("s3_config_dir"), s3KeySuffix,
         s3OutputConfiguration.getCluster());
@@ -131,31 +134,14 @@ public class OutputS3File extends Output implements RolloverCondition, RolloverH
     return "input.config-" + componentName + ".json";
   }
 
-
-  private Map<String, Object> getGlobalConfig() {
-    Map<String, Object> globalConfig = ConfigHandler.globalConfigs;
-    if (globalConfig == null) {
-      globalConfig = new HashMap<>();
-    }
-    return globalConfig;
-  }
-
-  private void removeGlobalConfig(List<Map<String, Object>> configList) {
-    Map<String, Object> globalConfig = getGlobalConfig();
-    if (configList != null && globalConfig != null) {
-      for (Entry<String, Object> globalConfigEntry : globalConfig.entrySet()) {
-        if (globalConfigEntry != null) {
-          String globalKey = globalConfigEntry.getKey();
-          if (globalKey != null && !globalKey.trim().isEmpty()) {
-            for (Map<String, Object> config : configList) {
-              if (config != null) {
-                config.remove(globalKey);
-              }
-            }
-          }
-        }
-      }
-    }
+  private void removeS3GlobalConfig(InputS3FileDescriptorImpl inputS3FileDescriptor) {
+    inputS3FileDescriptor.setSource(null);
+    inputS3FileDescriptor.setCopyFile(null);
+    inputS3FileDescriptor.setProcessFile(null);
+    inputS3FileDescriptor.setTail(null);
+    inputS3FileDescriptor.getAddFields().remove("ip");
+    inputS3FileDescriptor.getAddFields().remove("host");
+    inputS3FileDescriptor.getAddFields().remove("bundle_id");
   }
 
   /**
@@ -164,7 +150,7 @@ public class OutputS3File extends Output implements RolloverCondition, RolloverH
   @SuppressWarnings("unchecked")
   private synchronized void writeGlobalConfig(S3OutputConfiguration s3OutputConfiguration) {
     if (!uploadedGlobalConfig) {
-      Map<String, Object> globalConfig = LogFeederUtil.cloneObject(getGlobalConfig());
+      Map<String, Object> globalConfig = new HashMap<>();
       //updating global config before write to s3
       globalConfig.put("source", "s3_file");
       globalConfig.put("copy_file", false);
@@ -205,7 +191,7 @@ public class OutputS3File extends Output implements RolloverCondition, RolloverH
   public void write(String block, InputMarker inputMarker) throws Exception {
     if (logSpooler == null) {
       logSpooler = createSpooler(inputMarker.input.getFilePath());
-      s3Uploader = createUploader(inputMarker.input.getStringValue(INPUT_ATTRIBUTE_TYPE));
+      s3Uploader = createUploader(inputMarker.input.getInputDescriptor().getType());
     }
     logSpooler.add(block);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederUtil.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederUtil.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederUtil.java
index 1929178..d8a1fbb 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederUtil.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederUtil.java
@@ -27,13 +27,11 @@ import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.HashMap;
 import java.util.Hashtable;
-import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
 import org.apache.ambari.logfeeder.LogFeeder;
 import org.apache.ambari.logfeeder.metrics.MetricData;
-import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -205,55 +203,6 @@ public class LogFeederUtil {
     return retValue;
   }
 
-  @SuppressWarnings("unchecked")
-  public static boolean isEnabled(Map<String, Object> conditionConfigs, Map<String, Object> valueConfigs) {
-    Map<String, Object> conditions = (Map<String, Object>) conditionConfigs.get("conditions");
-    if (MapUtils.isEmpty(conditions)) {
-      return toBoolean((String) valueConfigs.get("is_enabled"), true);
-    }
-    
-    for (String conditionType : conditions.keySet()) {
-      if (!conditionType.equalsIgnoreCase("fields")) {
-        continue;
-      }
-      
-      Map<String, Object> fields = (Map<String, Object>) conditions.get("fields");
-      for (Map.Entry<String, Object> field : fields.entrySet()) {
-        if (field.getValue() instanceof String) {
-          if (isFieldConditionMatch(valueConfigs, field.getKey(), (String) field.getValue())) {
-            return true;
-          }
-        } else {
-          for (String stringValue : (List<String>) field.getValue()) {
-            if (isFieldConditionMatch(valueConfigs, field.getKey(), stringValue)) {
-              return true;
-            }
-          }
-        }
-      }
-    }
-    
-    return false;
-  }
-
-  private static boolean isFieldConditionMatch(Map<String, Object> configs, String fieldName, String stringValue) {
-    boolean allow = false;
-    String fieldValue = (String) configs.get(fieldName);
-    if (fieldValue != null && fieldValue.equalsIgnoreCase(stringValue)) {
-      allow = true;
-    } else {
-      @SuppressWarnings("unchecked")
-      Map<String, Object> addFields = (Map<String, Object>) configs.get("add_fields");
-      if (addFields != null && addFields.get(fieldName) != null) {
-        String addFieldValue = (String) addFields.get(fieldName);
-        if (stringValue.equalsIgnoreCase(addFieldValue)) {
-          allow = true;
-        }
-      }
-    }
-    return allow;
-  }
-
   public static void logStatForMetric(MetricData metric, String prefixStr, String postFix) {
     long currStat = metric.value;
     long currMS = System.currentTimeMillis();

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterGrokTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterGrokTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterGrokTest.java
index 99565c5..8d7e86c 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterGrokTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterGrokTest.java
@@ -18,12 +18,13 @@
 
 package org.apache.ambari.logfeeder.filter;
 
-import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.ambari.logfeeder.input.Input;
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.output.OutputManager;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterGrokDescriptor;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.FilterGrokDescriptorImpl;
 import org.apache.log4j.Logger;
 import org.easymock.Capture;
 import org.easymock.CaptureType;
@@ -43,12 +44,12 @@ public class FilterGrokTest {
   private OutputManager mockOutputManager;
   private Capture<Map<String, Object>> capture;
 
-  public void init(Map<String, Object> config) throws Exception {
+  public void init(FilterGrokDescriptor filterGrokDescriptor) throws Exception {
     mockOutputManager = EasyMock.strictMock(OutputManager.class);
     capture = EasyMock.newCapture(CaptureType.LAST);
 
     filterGrok = new FilterGrok();
-    filterGrok.loadConfig(config);
+    filterGrok.loadConfig(filterGrokDescriptor);
     filterGrok.setOutputManager(mockOutputManager);
     filterGrok.setInput(EasyMock.mock(Input.class));
     filterGrok.init();
@@ -58,10 +59,10 @@ public class FilterGrokTest {
   public void testFilterGrok_parseMessage() throws Exception {
     LOG.info("testFilterGrok_parseMessage()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("message_pattern", "(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}");
-    config.put("multiline_pattern", "^(%{TIMESTAMP_ISO8601:logtime})");
-    init(config);
+    FilterGrokDescriptorImpl filterGrokDescriptor = new FilterGrokDescriptorImpl();
+    filterGrokDescriptor.setMessagePattern("(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}");
+    filterGrokDescriptor.setMultilinePattern("^(%{TIMESTAMP_ISO8601:logtime})");
+    init(filterGrokDescriptor);
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall();
@@ -84,10 +85,10 @@ public class FilterGrokTest {
   public void testFilterGrok_parseMultiLineMessage() throws Exception {
     LOG.info("testFilterGrok_parseMultiLineMessage()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("message_pattern", "(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}");
-    config.put("multiline_pattern", "^(%{TIMESTAMP_ISO8601:logtime})");
-    init(config);
+    FilterGrokDescriptorImpl filterGrokDescriptor = new FilterGrokDescriptorImpl();
+    filterGrokDescriptor.setMessagePattern("(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}");
+    filterGrokDescriptor.setMultilinePattern("^(%{TIMESTAMP_ISO8601:logtime})");
+    init(filterGrokDescriptor);
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall();
@@ -114,10 +115,10 @@ public class FilterGrokTest {
   public void testFilterGrok_notMatchingMesagePattern() throws Exception {
     LOG.info("testFilterGrok_notMatchingMesagePattern()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("message_pattern", "(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}");
-    config.put("multiline_pattern", "^(%{TIMESTAMP_ISO8601:logtime})");
-    init(config);
+    FilterGrokDescriptorImpl filterGrokDescriptor = new FilterGrokDescriptorImpl();
+    filterGrokDescriptor.setMessagePattern("(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}");
+    filterGrokDescriptor.setMultilinePattern("^(%{TIMESTAMP_ISO8601:logtime})");
+    init(filterGrokDescriptor);
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall().anyTimes();
@@ -134,9 +135,9 @@ public class FilterGrokTest {
   public void testFilterGrok_noMesagePattern() throws Exception {
     LOG.info("testFilterGrok_noMesagePattern()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("multiline_pattern", "^(%{TIMESTAMP_ISO8601:logtime})");
-    init(config);
+    FilterGrokDescriptorImpl filterGrokDescriptor = new FilterGrokDescriptorImpl();
+    filterGrokDescriptor.setMultilinePattern("^(%{TIMESTAMP_ISO8601:logtime})");
+    init(filterGrokDescriptor);
 
     EasyMock.replay(mockOutputManager);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
index 643dafc..8f75c3a 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
@@ -21,7 +21,6 @@ package org.apache.ambari.logfeeder.filter;
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
 import java.util.Date;
-import java.util.HashMap;
 import java.util.Map;
 import java.util.TimeZone;
 
@@ -29,6 +28,7 @@ import org.apache.ambari.logfeeder.common.LogFeederConstants;
 import org.apache.ambari.logfeeder.common.LogfeederException;
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.output.OutputManager;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.FilterJsonDescriptorImpl;
 import org.apache.log4j.Logger;
 import org.easymock.Capture;
 import org.easymock.CaptureType;
@@ -47,12 +47,12 @@ public class FilterJSONTest {
   private OutputManager mockOutputManager;
   private Capture<Map<String, Object>> capture;
 
-  public void init(Map<String, Object> params) throws Exception {
+  public void init(FilterJsonDescriptorImpl filterJsonDescriptor) throws Exception {
     mockOutputManager = EasyMock.strictMock(OutputManager.class);
     capture = EasyMock.newCapture(CaptureType.LAST);
 
     filterJson = new FilterJSON();
-    filterJson.loadConfig(params);
+    filterJson.loadConfig(filterJsonDescriptor);
     filterJson.setOutputManager(mockOutputManager);
     filterJson.init();
   }
@@ -61,7 +61,7 @@ public class FilterJSONTest {
   public void testJSONFilterCode_convertFields() throws Exception {
     LOG.info("testJSONFilterCode_convertFields()");
 
-    init(new HashMap<String, Object>());
+    init(new FilterJsonDescriptorImpl());
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall();
@@ -86,7 +86,7 @@ public class FilterJSONTest {
   public void testJSONFilterCode_logTimeOnly() throws Exception {
     LOG.info("testJSONFilterCode_logTimeOnly()");
 
-    init(new HashMap<String, Object>());
+    init(new FilterJsonDescriptorImpl());
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall();
@@ -111,7 +111,7 @@ public class FilterJSONTest {
   public void testJSONFilterCode_lineNumberOnly() throws Exception {
     LOG.info("testJSONFilterCode_lineNumberOnly()");
 
-    init(new HashMap<String, Object>());
+    init(new FilterJsonDescriptorImpl());
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall();
@@ -131,7 +131,7 @@ public class FilterJSONTest {
   @Test
   public void testJSONFilterCode_invalidJson() throws Exception {
     LOG.info("testJSONFilterCode_invalidJson()");
-    init(new HashMap<String, Object>());
+    init(new FilterJsonDescriptorImpl());
     String inputStr="invalid json";
     try{
     filterJson.apply(inputStr,new InputMarker(null, null, 0));

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterKeyValueTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterKeyValueTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterKeyValueTest.java
index 05647e6..ae978fb 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterKeyValueTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterKeyValueTest.java
@@ -18,10 +18,11 @@
 
 package org.apache.ambari.logfeeder.filter;
 
-import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.ambari.logfeeder.output.OutputManager;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterKeyValueDescriptor;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.FilterKeyValueDescriptorImpl;
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.log4j.Logger;
 import org.easymock.Capture;
@@ -41,12 +42,12 @@ public class FilterKeyValueTest {
   private OutputManager mockOutputManager;
   private Capture<Map<String, Object>> capture;
 
-  public void init(Map<String, Object> config) throws Exception {
+  public void init(FilterKeyValueDescriptor filterKeyValueDescriptor) throws Exception {
     mockOutputManager = EasyMock.strictMock(OutputManager.class);
     capture = EasyMock.newCapture(CaptureType.LAST);
 
     filterKeyValue = new FilterKeyValue();
-    filterKeyValue.loadConfig(config);
+    filterKeyValue.loadConfig(filterKeyValueDescriptor);
     filterKeyValue.setOutputManager(mockOutputManager);
     filterKeyValue.init();
   }
@@ -55,11 +56,10 @@ public class FilterKeyValueTest {
   public void testFilterKeyValue_extraction() throws Exception {
     LOG.info("testFilterKeyValue_extraction()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("source_field", "keyValueField");
-    config.put("field_split", "&");
-    // using default value split:
-    init(config);
+    FilterKeyValueDescriptorImpl filterKeyValueDescriptor = new FilterKeyValueDescriptorImpl();
+    filterKeyValueDescriptor.setSourceField("keyValueField");
+    filterKeyValueDescriptor.setFieldSplit("&");
+    init(filterKeyValueDescriptor);
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall();
@@ -80,11 +80,11 @@ public class FilterKeyValueTest {
   public void testFilterKeyValue_extractionWithBorders() throws Exception {
     LOG.info("testFilterKeyValue_extractionWithBorders()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("source_field", "keyValueField");
-    config.put("field_split", "&");
-    config.put("value_borders", "()");
-    init(config);
+    FilterKeyValueDescriptorImpl filterKeyValueDescriptor = new FilterKeyValueDescriptorImpl();
+    filterKeyValueDescriptor.setSourceField("keyValueField");
+    filterKeyValueDescriptor.setFieldSplit("&");
+    filterKeyValueDescriptor.setValueBorders("()");
+    init(filterKeyValueDescriptor);
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall();
@@ -105,10 +105,9 @@ public class FilterKeyValueTest {
   public void testFilterKeyValue_missingSourceField() throws Exception {
     LOG.info("testFilterKeyValue_missingSourceField()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("field_split", "&");
-    // using default value split: =
-    init(config);
+    FilterKeyValueDescriptorImpl filterKeyValueDescriptor = new FilterKeyValueDescriptorImpl();
+    filterKeyValueDescriptor.setFieldSplit("&");
+    init(filterKeyValueDescriptor);
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall().anyTimes();
@@ -124,10 +123,10 @@ public class FilterKeyValueTest {
   public void testFilterKeyValue_noSourceFieldPresent() throws Exception {
     LOG.info("testFilterKeyValue_noSourceFieldPresent()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("source_field", "keyValueField");
-    config.put("field_split", "&");
-    init(config);
+    FilterKeyValueDescriptorImpl filterKeyValueDescriptor = new FilterKeyValueDescriptorImpl();
+    filterKeyValueDescriptor.setSourceField("keyValueField");
+    filterKeyValueDescriptor.setFieldSplit("&");
+    init(filterKeyValueDescriptor);
 
     // using default value split: =
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/input/InputFileTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/input/InputFileTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/input/InputFileTest.java
index 522f6bb..3a5f31e 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/input/InputFileTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/input/InputFileTest.java
@@ -22,12 +22,11 @@ import java.io.File;
 import java.io.IOException;
 import java.nio.charset.Charset;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 
 import org.apache.ambari.logfeeder.filter.Filter;
 import org.apache.ambari.logfeeder.input.InputMarker;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputFileDescriptorImpl;
 import org.apache.commons.io.FileUtils;
 import org.apache.log4j.Logger;
 import org.easymock.EasyMock;
@@ -78,15 +77,14 @@ public class InputFileTest {
   }
 
   public void init(String path) throws Exception {
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("source", "file");
-    config.put("tail", "true");
-    config.put("gen_event_md5", "true");
-    config.put("start_position", "beginning");
-
-    config.put("type", "hdfs_datanode");
-    config.put("rowtype", "service");
-    config.put("path", path);
+    InputFileDescriptorImpl inputFileDescriptor = new InputFileDescriptorImpl();
+    inputFileDescriptor.setSource("file");
+    inputFileDescriptor.setTail(true);
+    inputFileDescriptor.setGenEventMd5(true);
+    inputFileDescriptor.setStartPosition("beginning");
+    inputFileDescriptor.setType("hdfs_datanode");
+    inputFileDescriptor.setRowtype("service");
+    inputFileDescriptor.setPath(path);
 
     Filter capture = new Filter() {
       @Override
@@ -104,7 +102,7 @@ public class InputFileTest {
     };
 
     inputFile = new InputFile();
-    inputFile.loadConfig(config);
+    inputFile.loadConfig(inputFileDescriptor);
     inputFile.addFilter(capture);
     inputFile.init();
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandlerTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandlerTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandlerTest.java
index 44314c6..4123dad 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandlerTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandlerTest.java
@@ -21,13 +21,10 @@ package org.apache.ambari.logfeeder.logconfig;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Date;
-import java.util.HashMap;
-import java.util.Map;
 
 import static org.easymock.EasyMock.*;
 import static org.junit.Assert.*;
 
-import org.apache.ambari.logfeeder.common.LogFeederConstants;
 import org.apache.ambari.logfeeder.input.Input;
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.loglevelfilter.FilterLogData;
@@ -36,6 +33,7 @@ import org.apache.ambari.logfeeder.util.LogFeederUtil;
 import org.apache.ambari.logsearch.config.api.LogSearchConfig;
 import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
 import org.apache.commons.lang.time.DateUtils;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputDescriptorImpl;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -44,16 +42,18 @@ public class LogConfigHandlerTest {
   private static InputMarker inputMarkerAudit;
   private static InputMarker inputMarkerService;
   static {
-    Map<String, Object> auditMap = new HashMap<String, Object>();
-    auditMap.put(LogFeederConstants.ROW_TYPE, "audit");
+    InputDescriptorImpl auditInputDescriptor = new InputDescriptorImpl() {};
+    auditInputDescriptor.setRowtype("audit");
+    
     Input auditInput = strictMock(Input.class);
-    expect(auditInput.getConfigs()).andReturn(auditMap).anyTimes();
+    expect(auditInput.getInputDescriptor()).andReturn(auditInputDescriptor).anyTimes();
     inputMarkerAudit = new InputMarker(auditInput, null, 0);
     
-    Map<String, Object> serviceMap = new HashMap<String, Object>();
-    serviceMap.put(LogFeederConstants.ROW_TYPE, "service");
+    InputDescriptorImpl serviceInputDescriptor = new InputDescriptorImpl() {};
+    serviceInputDescriptor.setRowtype("service");
+    
     Input serviceInput = strictMock(Input.class);
-    expect(serviceInput.getConfigs()).andReturn(serviceMap).anyTimes();
+    expect(serviceInput.getInputDescriptor()).andReturn(serviceInputDescriptor).anyTimes();
     inputMarkerService = new InputMarker(serviceInput, null, 0);
     
     replay(auditInput, serviceInput);

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperDateTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperDateTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperDateTest.java
index 8beecda..0a0a9fd 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperDateTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperDateTest.java
@@ -25,6 +25,7 @@ import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.ambari.logfeeder.common.LogFeederConstants;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.MapDateDescriptorImpl;
 import org.apache.commons.lang3.time.DateUtils;
 import org.apache.log4j.Logger;
 import org.junit.Test;
@@ -40,11 +41,11 @@ public class MapperDateTest {
   public void testMapperDate_epoch() {
     LOG.info("testMapperDate_epoch()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("target_date_pattern", "epoch");
+    MapDateDescriptorImpl mapDateDescriptor = new MapDateDescriptorImpl();
+    mapDateDescriptor.setTargetDatePattern("epoch");
 
     MapperDate mapperDate = new MapperDate();
-    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapDateDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
 
@@ -61,11 +62,11 @@ public class MapperDateTest {
   public void testMapperDate_pattern() throws Exception {
     LOG.info("testMapperDate_pattern()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("target_date_pattern", "yyyy-MM-dd HH:mm:ss.SSS");
+    MapDateDescriptorImpl mapDateDescriptor = new MapDateDescriptorImpl();
+    mapDateDescriptor.setTargetDatePattern("yyyy-MM-dd HH:mm:ss.SSS");
 
     MapperDate mapperDate = new MapperDate();
-    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapDateDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
     String dateString = "2016-04-08 15:55:23.548";
@@ -80,44 +81,35 @@ public class MapperDateTest {
   }
 
   @Test
-  public void testMapperDate_configNotMap() {
-    LOG.info("testMapperDate_configNotMap()");
-
-    MapperDate mapperDate = new MapperDate();
-    assertFalse("Was able to initialize!", mapperDate.init(null, "someField", null, ""));
-  }
-
-  @Test
   public void testMapperDate_noDatePattern() {
     LOG.info("testMapperDate_noDatePattern()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("some_param", "some_value");
+    MapDateDescriptorImpl mapDateDescriptor = new MapDateDescriptorImpl();
 
     MapperDate mapperDate = new MapperDate();
-    assertFalse("Was able to initialize!", mapperDate.init(null, "someField", null, mapConfigs));
+    assertFalse("Was not able to initialize!", mapperDate.init(null, "someField", null, mapDateDescriptor));
   }
 
   @Test
   public void testMapperDate_notParsableDatePattern() {
     LOG.info("testMapperDate_notParsableDatePattern()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("target_date_pattern", "not_parsable_content");
+    MapDateDescriptorImpl mapDateDescriptor = new MapDateDescriptorImpl();
+    mapDateDescriptor.setTargetDatePattern("not_parsable_content");
 
     MapperDate mapperDate = new MapperDate();
-    assertFalse("Was able to initialize!", mapperDate.init(null, "someField", null, mapConfigs));
+    assertFalse("Was not able to initialize!", mapperDate.init(null, "someField", null, mapDateDescriptor));
   }
 
   @Test
   public void testMapperDate_invalidEpochValue() {
     LOG.info("testMapperDate_invalidEpochValue()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("target_date_pattern", "epoch");
+    MapDateDescriptorImpl mapDateDescriptor = new MapDateDescriptorImpl();
+    mapDateDescriptor.setTargetDatePattern("epoch");
 
     MapperDate mapperDate = new MapperDate();
-    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapDateDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
     String invalidValue = "abc";
@@ -131,11 +123,11 @@ public class MapperDateTest {
   public void testMapperDate_invalidDateStringValue() {
     LOG.info("testMapperDate_invalidDateStringValue()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("target_date_pattern", "yyyy-MM-dd HH:mm:ss.SSS");
+    MapDateDescriptorImpl mapDateDescriptor = new MapDateDescriptorImpl();
+    mapDateDescriptor.setTargetDatePattern("yyyy-MM-dd HH:mm:ss.SSS");
 
     MapperDate mapperDate = new MapperDate();
-    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapDateDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
     String invalidValue = "abc";

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopyTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopyTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopyTest.java
index 108c96e..4899dfc 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopyTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopyTest.java
@@ -21,6 +21,7 @@ package org.apache.ambari.logfeeder.mapper;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.MapFieldCopyDescriptorImpl;
 import org.apache.log4j.Logger;
 import org.junit.Test;
 
@@ -35,11 +36,11 @@ public class MapperFieldCopyTest {
   public void testMapperFieldCopy_copyField() {
     LOG.info("testMapperFieldCopy_copyField()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("copy_name", "someOtherField");
+    MapFieldCopyDescriptorImpl mapFieldCopyDescriptor = new MapFieldCopyDescriptorImpl();
+    mapFieldCopyDescriptor.setCopyName("someOtherField");
 
     MapperFieldCopy mapperFieldCopy = new MapperFieldCopy();
-    assertTrue("Could not initialize!", mapperFieldCopy.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperFieldCopy.init(null, "someField", null, mapFieldCopyDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
     jsonObj.put("someField", "someValue");
@@ -52,20 +53,12 @@ public class MapperFieldCopyTest {
   }
 
   @Test
-  public void testMapperFielCopy_configNotMap() {
-    LOG.info("testMapperFieldCopy_configNotMap()");
-
-    MapperFieldCopy mapperFieldCopy = new MapperFieldCopy();
-    assertFalse("Was able to initialize!", mapperFieldCopy.init(null, "someField", null, ""));
-  }
-
-  @Test
   public void testMapperFieldCopy_noNewFieldName() {
     LOG.info("testMapperFieldCopy_noNewFieldName()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
+    MapFieldCopyDescriptorImpl mapFieldCopyDescriptor = new MapFieldCopyDescriptorImpl();
 
     MapperFieldCopy mapperFieldCopy = new MapperFieldCopy();
-    assertFalse("Was able to initialize!", mapperFieldCopy.init(null, "someField", null, mapConfigs));
+    assertFalse("Was not able to initialize!", mapperFieldCopy.init(null, "someField", null, mapFieldCopyDescriptor));
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldNameTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldNameTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldNameTest.java
index 8ecaad1..74b88fc 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldNameTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldNameTest.java
@@ -21,6 +21,7 @@ package org.apache.ambari.logfeeder.mapper;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.MapFieldNameDescriptorImpl;
 import org.apache.log4j.Logger;
 import org.junit.Test;
 
@@ -35,11 +36,11 @@ public class MapperFieldNameTest {
   public void testMapperFieldName_replaceField() {
     LOG.info("testMapperFieldName_replaceField()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("new_fieldname", "someOtherField");
+    MapFieldNameDescriptorImpl mapFieldNameDescriptor = new MapFieldNameDescriptorImpl();
+    mapFieldNameDescriptor.setNewFieldName("someOtherField");
 
     MapperFieldName mapperFieldName = new MapperFieldName();
-    assertTrue("Could not initialize!", mapperFieldName.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperFieldName.init(null, "someField", null, mapFieldNameDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
     jsonObj.put("someField", "someValue");
@@ -52,20 +53,12 @@ public class MapperFieldNameTest {
   }
 
   @Test
-  public void testMapperFieldName_configNotMap() {
-    LOG.info("testMapperFieldName_configNotMap()");
-
-    MapperFieldName mapperFieldName = new MapperFieldName();
-    assertFalse("Was able to initialize!", mapperFieldName.init(null, "someField", null, ""));
-  }
-
-  @Test
   public void testMapperFieldName_noNewFieldName() {
     LOG.info("testMapperFieldName_noNewFieldName()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
+    MapFieldNameDescriptorImpl mapFieldNameDescriptor = new MapFieldNameDescriptorImpl();
 
     MapperFieldName mapperFieldName = new MapperFieldName();
-    assertFalse("Was able to initialize!", mapperFieldName.init(null, "someField", null, mapConfigs));
+    assertFalse("Was able to initialize!", mapperFieldName.init(null, "someField", null, mapFieldNameDescriptor));
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldValueTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldValueTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldValueTest.java
index fce4308..1a33740 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldValueTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldValueTest.java
@@ -21,6 +21,7 @@ package org.apache.ambari.logfeeder.mapper;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.MapFieldValueDescriptorImpl;
 import org.apache.log4j.Logger;
 import org.junit.Test;
 
@@ -35,12 +36,12 @@ public class MapperFieldValueTest {
   public void testMapperFieldValue_replaceValue() {
     LOG.info("testMapperFieldValue_replaceValue()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("pre_value", "someValue");
-    mapConfigs.put("post_value", "someOtherValue");
+    MapFieldValueDescriptorImpl mapFieldValueDescriptor = new MapFieldValueDescriptorImpl();
+    mapFieldValueDescriptor.setPreValue("someValue");
+    mapFieldValueDescriptor.setPostValue("someOtherValue");
 
     MapperFieldValue mapperFieldValue = new MapperFieldValue();
-    assertTrue("Could not initialize!", mapperFieldValue.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperFieldValue.init(null, "someField", null, mapFieldValueDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
 
@@ -52,33 +53,25 @@ public class MapperFieldValueTest {
   }
 
   @Test
-  public void testMapperFieldValue_configNotMap() {
-    LOG.info("testMapperFieldValue_configNotMap()");
-
-    MapperFieldValue mapperFieldValue = new MapperFieldValue();
-    assertFalse("Was able to initialize!", mapperFieldValue.init(null, "someField", null, ""));
-  }
-
-  @Test
   public void testMapperFieldValue_noPostValue() {
     LOG.info("testMapperFieldValue_noPostValue()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
+    MapFieldValueDescriptorImpl mapFieldValueDescriptor = new MapFieldValueDescriptorImpl();
 
     MapperFieldValue mapperFieldValue = new MapperFieldValue();
-    assertFalse("Was able to initialize!", mapperFieldValue.init(null, "someField", null, mapConfigs));
+    assertFalse("Was not able to initialize!", mapperFieldValue.init(null, "someField", null, mapFieldValueDescriptor));
   }
 
   @Test
   public void testMapperFieldValue_noPreValueFound() {
     LOG.info("testMapperFieldValue_noPreValueFound()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("pre_value", "someValue");
-    mapConfigs.put("post_value", "someOtherValue");
+    MapFieldValueDescriptorImpl mapFieldValueDescriptor = new MapFieldValueDescriptorImpl();
+    mapFieldValueDescriptor.setPreValue("someValue");
+    mapFieldValueDescriptor.setPostValue("someOtherValue");
 
     MapperFieldValue mapperFieldValue = new MapperFieldValue();
-    assertTrue("Could not initialize!", mapperFieldValue.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperFieldValue.init(null, "someField", null, mapFieldValueDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputLineFilterTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputLineFilterTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputLineFilterTest.java
index 1ccc319..6e108ab 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputLineFilterTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputLineFilterTest.java
@@ -21,6 +21,8 @@ package org.apache.ambari.logfeeder.output;
 import org.apache.ambari.logfeeder.common.LogFeederConstants;
 import org.apache.ambari.logfeeder.input.Input;
 import org.apache.ambari.logfeeder.input.cache.LRUCache;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputDescriptorImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
@@ -49,7 +51,7 @@ public class OutputLineFilterTest {
   public void testApplyWithFilterOutByDedupInterval() {
     // GIVEN
     EasyMock.expect(inputMock.getCache()).andReturn(createLruCache(DEFAULT_DUMMY_MESSAGE, 100L, false));
-    EasyMock.expect(inputMock.getConfigs()).andReturn(generateInputConfigs());
+    EasyMock.expect(inputMock.getInputDescriptor()).andReturn(generateInputDescriptor());
     EasyMock.expect(inputMock.getCacheKeyField()).andReturn(CACHE_KEY_FIELD);
     EasyMock.replay(inputMock);
     // WHEN
@@ -63,7 +65,7 @@ public class OutputLineFilterTest {
   public void testApplyDoNotFilterOutDataByDedupInterval() {
     // GIVEN
     EasyMock.expect(inputMock.getCache()).andReturn(createLruCache(DEFAULT_DUMMY_MESSAGE, 10L, false));
-    EasyMock.expect(inputMock.getConfigs()).andReturn(generateInputConfigs());
+    EasyMock.expect(inputMock.getInputDescriptor()).andReturn(generateInputDescriptor());
     EasyMock.expect(inputMock.getCacheKeyField()).andReturn(CACHE_KEY_FIELD);
     EasyMock.replay(inputMock);
     // WHEN
@@ -77,7 +79,7 @@ public class OutputLineFilterTest {
   public void testApplyWithFilterOutByDedupLast() {
     // GIVEN
     EasyMock.expect(inputMock.getCache()).andReturn(createLruCache(DEFAULT_DUMMY_MESSAGE, 10L, true));
-    EasyMock.expect(inputMock.getConfigs()).andReturn(generateInputConfigs());
+    EasyMock.expect(inputMock.getInputDescriptor()).andReturn(generateInputDescriptor());
     EasyMock.expect(inputMock.getCacheKeyField()).andReturn(CACHE_KEY_FIELD);
     EasyMock.replay(inputMock);
     // WHEN
@@ -91,7 +93,7 @@ public class OutputLineFilterTest {
   public void testApplyDoNotFilterOutDataByDedupLast() {
     // GIVEN
     EasyMock.expect(inputMock.getCache()).andReturn(createLruCache("myMessage2", 10L, true));
-    EasyMock.expect(inputMock.getConfigs()).andReturn(generateInputConfigs());
+    EasyMock.expect(inputMock.getInputDescriptor()).andReturn(generateInputDescriptor());
     EasyMock.expect(inputMock.getCacheKeyField()).andReturn(CACHE_KEY_FIELD);
     EasyMock.replay(inputMock);
     // WHEN
@@ -117,7 +119,7 @@ public class OutputLineFilterTest {
   public void testApplyWithoutInMemoryTimestamp() {
     // GIVEN
     EasyMock.expect(inputMock.getCache()).andReturn(createLruCache(DEFAULT_DUMMY_MESSAGE, 100L, true));
-    EasyMock.expect(inputMock.getConfigs()).andReturn(generateInputConfigs());
+    EasyMock.expect(inputMock.getInputDescriptor()).andReturn(generateInputDescriptor());
     EasyMock.expect(inputMock.getCacheKeyField()).andReturn(CACHE_KEY_FIELD);
     EasyMock.replay(inputMock);
     Map<String, Object> lineMap = generateLineMap();
@@ -133,7 +135,7 @@ public class OutputLineFilterTest {
   public void testApplyWithoutLogMessage() {
     // GIVEN
     EasyMock.expect(inputMock.getCache()).andReturn(createLruCache(DEFAULT_DUMMY_MESSAGE, 100L, true));
-    EasyMock.expect(inputMock.getConfigs()).andReturn(generateInputConfigs());
+    EasyMock.expect(inputMock.getInputDescriptor()).andReturn(generateInputDescriptor());
     EasyMock.expect(inputMock.getCacheKeyField()).andReturn(CACHE_KEY_FIELD);
     EasyMock.replay(inputMock);
     Map<String, Object> lineMap = generateLineMap();
@@ -152,10 +154,10 @@ public class OutputLineFilterTest {
     return lineMap;
   }
 
-  private Map<String, Object> generateInputConfigs() {
-    Map<String, Object> inputConfigs = new HashMap<>();
-    inputConfigs.put(LogFeederConstants.ROW_TYPE, "service");
-    return inputConfigs;
+  private InputDescriptor generateInputDescriptor() {
+    InputDescriptorImpl inputDescriptor = new InputDescriptorImpl() {};
+    inputDescriptor.setRowtype("service");
+    return inputDescriptor;
   }
 
   private LRUCache createLruCache(String defaultKey, long defaultValue, boolean lastDedupEanabled) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputManagerTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputManagerTest.java
index cf1d25a..5abb720 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputManagerTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputManagerTest.java
@@ -32,6 +32,7 @@ import java.util.Map;
 import org.apache.ambari.logfeeder.input.Input;
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.metrics.MetricData;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputDescriptorImpl;
 import org.junit.Test;
 
 public class OutputManagerTest {
@@ -91,15 +92,17 @@ public class OutputManagerTest {
     
     Input mockInput = strictMock(Input.class);
     InputMarker inputMarker = new InputMarker(mockInput, null, 0);
+    InputDescriptorImpl inputDescriptor = new InputDescriptorImpl() {};
+    inputDescriptor.setAddFields(Collections.<String, String> emptyMap());
     
     Output output1 = strictMock(Output.class);
     Output output2 = strictMock(Output.class);
     Output output3 = strictMock(Output.class);
     
-    expect(mockInput.getContextFields()).andReturn(Collections.<String, String> emptyMap());
+    expect(mockInput.getInputDescriptor()).andReturn(inputDescriptor);
     expect(mockInput.isUseEventMD5()).andReturn(false);
     expect(mockInput.isGenEventMD5()).andReturn(false);
-    expect(mockInput.getConfigs()).andReturn(Collections.<String, Object> emptyMap());
+    expect(mockInput.getInputDescriptor()).andReturn(inputDescriptor);
     expect(mockInput.getCache()).andReturn(null);
     expect(mockInput.getOutputList()).andReturn(Arrays.asList(output1, output2, output3));
 
@@ -125,12 +128,13 @@ public class OutputManagerTest {
     
     Input mockInput = strictMock(Input.class);
     InputMarker inputMarker = new InputMarker(mockInput, null, 0);
+    InputDescriptorImpl inputDescriptor = new InputDescriptorImpl() {};
     
     Output output1 = strictMock(Output.class);
     Output output2 = strictMock(Output.class);
     Output output3 = strictMock(Output.class);
     
-    expect(mockInput.getConfigs()).andReturn(Collections.<String, Object> emptyMap());
+    expect(mockInput.getInputDescriptor()).andReturn(inputDescriptor);
     expect(mockInput.getOutputList()).andReturn(Arrays.asList(output1, output2, output3));
     
     output1.write(jsonString, inputMarker); expectLastCall();

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputS3FileTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputS3FileTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputS3FileTest.java
index 1872135..7c6aca2 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputS3FileTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputS3FileTest.java
@@ -22,6 +22,7 @@ import org.apache.ambari.logfeeder.input.Input;
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.output.spool.LogSpooler;
 import org.apache.ambari.logfeeder.output.spool.LogSpoolerContext;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputDescriptorImpl;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -33,7 +34,6 @@ import static org.easymock.EasyMock.*;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
-
 public class OutputS3FileTest {
 
   private Map<String, Object> configMap;
@@ -71,8 +71,11 @@ public class OutputS3FileTest {
 
     Input input = mock(Input.class);
     InputMarker inputMarker = new InputMarker(input, null, 0);
+    InputDescriptorImpl inputDescriptor = new InputDescriptorImpl() {};
+    inputDescriptor.setType("hdfs-namenode");
+    
     expect(input.getFilePath()).andReturn("/var/log/hdfs-namenode.log");
-    expect(input.getStringValue(OutputS3File.INPUT_ATTRIBUTE_TYPE)).andReturn("hdfs-namenode");
+    expect(input.getInputDescriptor()).andReturn(inputDescriptor);
     final LogSpooler spooler = mock(LogSpooler.class);
     spooler.add("log event block");
     final S3Uploader s3Uploader = mock(S3Uploader.class);
@@ -99,8 +102,11 @@ public class OutputS3FileTest {
   public void shouldReuseSpoolerForSamePath() throws Exception {
     Input input = mock(Input.class);
     InputMarker inputMarker = new InputMarker(input, null, 0);
+    InputDescriptorImpl inputDescriptor = new InputDescriptorImpl() {};
+    inputDescriptor.setType("hdfs-namenode");
+    
     expect(input.getFilePath()).andReturn("/var/log/hdfs-namenode.log");
-    expect(input.getStringValue(OutputS3File.INPUT_ATTRIBUTE_TYPE)).andReturn("hdfs-namenode");
+    expect(input.getInputDescriptor()).andReturn(inputDescriptor);
     final LogSpooler spooler = mock(LogSpooler.class);
     spooler.add("log event block1");
     spooler.add("log event block2");
@@ -169,8 +175,11 @@ public class OutputS3FileTest {
   public void shouldUploadFileOnRollover() throws Exception {
     Input input = mock(Input.class);
     InputMarker inputMarker = new InputMarker(input, null, 0);
+    InputDescriptorImpl inputDescriptor = new InputDescriptorImpl() {};
+    inputDescriptor.setType("hdfs-namenode");
+    
     expect(input.getFilePath()).andReturn("/var/log/hdfs-namenode.log");
-    expect(input.getStringValue(OutputS3File.INPUT_ATTRIBUTE_TYPE)).andReturn("hdfs-namenode");
+    expect(input.getInputDescriptor()).andReturn(inputDescriptor);
     final LogSpooler spooler = mock(LogSpooler.class);
     spooler.add("log event block1");
     final S3Uploader s3Uploader = mock(S3Uploader.class);

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ShipperConfigManager.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ShipperConfigManager.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ShipperConfigManager.java
index 1118233..44d91a9 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ShipperConfigManager.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ShipperConfigManager.java
@@ -21,7 +21,9 @@ package org.apache.ambari.logsearch.manager;
 
 import java.util.List;
 
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
 import org.apache.ambari.logsearch.configurer.LogSearchConfigConfigurer;
+import org.apache.ambari.logsearch.model.common.LSServerInputConfig;
 import org.apache.ambari.logsearch.model.common.LSServerLogLevelFilterMap;
 import org.apache.log4j.Logger;
 
@@ -50,8 +52,9 @@ public class ShipperConfigManager extends JsonManagerBase {
     return LogSearchConfigConfigurer.getConfig().getServices(clusterName);
   }
 
-  public String getInputConfig(String clusterName, String serviceName) {
-    return LogSearchConfigConfigurer.getConfig().getInputConfig(clusterName, serviceName);
+  public LSServerInputConfig getInputConfig(String clusterName, String serviceName) {
+    InputConfig inputConfig = LogSearchConfigConfigurer.getConfig().getInputConfig(clusterName, serviceName);
+    return new LSServerInputConfig(inputConfig);
   }
 
   public Response createInputConfig(String clusterName, String serviceName, String inputConfig) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerConditions.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerConditions.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerConditions.java
new file mode 100644
index 0000000..9cd9710
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerConditions.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.Conditions;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerConditions {
+  private LSServerFields fields;
+  
+  public LSServerConditions(Conditions conditions) {
+    this.fields = new LSServerFields(conditions.getFields());
+  }
+
+  public LSServerFields getFields() {
+    return fields;
+  }
+
+  public void setFields(LSServerFields fields) {
+    this.fields = fields;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFields.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFields.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFields.java
new file mode 100644
index 0000000..5f570da
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFields.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.util.Set;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.Fields;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerFields {
+  private Set<String> type;
+  
+  public LSServerFields(Fields fields) {
+    this.type = fields.getType();
+  }
+
+  public Set<String> getType() {
+    return type;
+  }
+
+  public void setType(Set<String> type) {
+    this.type = type;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilter.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilter.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilter.java
new file mode 100644
index 0000000..0190c01
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilter.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.PostMapValues;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonInclude.Include;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+@JsonInclude(Include.NON_NULL)
+public abstract class LSServerFilter {
+  private String filter;
+  
+  private LSServerConditions conditions;
+  
+  @JsonProperty("sort_order")
+  private Integer sortOrder;
+  
+  private String sourceField;
+  
+  @JsonProperty("remove_source_field")
+  private Boolean removeSourceField;
+  
+  private Map<String, List<LSServerPostMapValues>> postMapValues;
+  
+  @JsonProperty("is_enabled")
+  private Boolean isEnabled;
+
+  public LSServerFilter(FilterDescriptor filterDescriptor) {
+    this.filter = filterDescriptor.getFilter();
+    this.conditions = new LSServerConditions(filterDescriptor.getConditions());
+    this.sortOrder = filterDescriptor.getSortOrder();
+    this.sourceField = filterDescriptor.getSourceField();
+    this.removeSourceField = filterDescriptor.isRemoveSourceField();
+    
+    postMapValues = new HashMap<String, List<LSServerPostMapValues>>();
+    for (Map.Entry<String, ? extends List<? extends PostMapValues>> e : filterDescriptor.getPostMapValues().entrySet()) {
+      List<LSServerPostMapValues> lsServerPostMapValues = new ArrayList<>();
+      for (PostMapValues pmv : e.getValue()) {
+        lsServerPostMapValues.add(new LSServerPostMapValues(pmv));
+      }
+      postMapValues.put(e.getKey(), lsServerPostMapValues);
+    }
+    
+    this.isEnabled = filterDescriptor.isEnabled();
+  }
+
+  public String getFilter() {
+    return filter;
+  }
+
+  public void setFilter(String filter) {
+    this.filter = filter;
+  }
+
+  public LSServerConditions getConditions() {
+    return conditions;
+  }
+
+  public void setConditions(LSServerConditions conditions) {
+    this.conditions = conditions;
+  }
+
+  public Integer getSortOrder() {
+    return sortOrder;
+  }
+
+  public void setSortOrder(Integer sortOrder) {
+    this.sortOrder = sortOrder;
+  }
+
+  public String getSourceField() {
+    return sourceField;
+  }
+
+  public void setSourceField(String sourceField) {
+    this.sourceField = sourceField;
+  }
+
+  public Boolean getRemoveSourceField() {
+    return removeSourceField;
+  }
+
+  public void setRemoveSourceField(Boolean removeSourceField) {
+    this.removeSourceField = removeSourceField;
+  }
+
+  public Map<String, List<LSServerPostMapValues>> getPostMapValues() {
+    return postMapValues;
+  }
+
+  public void setPostMapValues(Map<String, List<LSServerPostMapValues>> postMapValues) {
+    this.postMapValues = postMapValues;
+  }
+
+  public Boolean getIsEnabled() {
+    return isEnabled;
+  }
+
+  public void setIsEnabled(Boolean isEnabled) {
+    this.isEnabled = isEnabled;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterGrok.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterGrok.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterGrok.java
new file mode 100644
index 0000000..a8c4a7a
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterGrok.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterGrokDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerFilterGrok extends LSServerFilter {
+  @JsonProperty("log4j_format")
+  private String log4jFormat;
+
+  @JsonProperty("multiline_pattern")
+  private String multilinePattern;
+
+  @JsonProperty("message_pattern")
+  private String messagePattern;
+
+  public LSServerFilterGrok(FilterDescriptor filterDescriptor) {
+    super(filterDescriptor);
+    if (filterDescriptor instanceof FilterGrokDescriptor) {
+      FilterGrokDescriptor filterGrokDescriptor = (FilterGrokDescriptor)filterDescriptor;
+      this.log4jFormat = filterGrokDescriptor.getLog4jFormat();
+      this.multilinePattern = filterGrokDescriptor.getMultilinePattern();
+      this.messagePattern = filterGrokDescriptor.getMessagePattern();
+    }
+  }
+
+  public String getLog4jFormat() {
+    return log4jFormat;
+  }
+
+  public void setLog4jFormat(String log4jFormat) {
+    this.log4jFormat = log4jFormat;
+  }
+
+  public String getMultilinePattern() {
+    return multilinePattern;
+  }
+
+  public void setMultilinePattern(String multilinePattern) {
+    this.multilinePattern = multilinePattern;
+  }
+
+  public String getMessagePattern() {
+    return messagePattern;
+  }
+
+  public void setMessagePattern(String messagePattern) {
+    this.messagePattern = messagePattern;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterJson.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterJson.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterJson.java
new file mode 100644
index 0000000..3c0ed17
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterJson.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerFilterJson extends LSServerFilter {
+  public LSServerFilterJson(FilterDescriptor filterDescriptor) {
+    super(filterDescriptor);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterKeyValue.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterKeyValue.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterKeyValue.java
new file mode 100644
index 0000000..dcee25d
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterKeyValue.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterKeyValueDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerFilterKeyValue extends LSServerFilter {
+  @JsonProperty("field_split")
+  private String fieldSplit;
+
+  @JsonProperty("value_split")
+  private String valueSplit;
+
+  @JsonProperty("value_borders")
+  private String valueBorders;
+
+  public LSServerFilterKeyValue(FilterDescriptor filterDescriptor) {
+    super(filterDescriptor);
+    FilterKeyValueDescriptor filterKeyValueDescriptor = (FilterKeyValueDescriptor)filterDescriptor;
+    this.fieldSplit = filterKeyValueDescriptor.getFieldSplit();
+    this.valueSplit = filterKeyValueDescriptor.getValueSplit();
+    this.valueBorders = filterKeyValueDescriptor.getValueBorders();
+  }
+
+  public String getFieldSplit() {
+    return fieldSplit;
+  }
+
+  public void setFieldSplit(String fieldSplit) {
+    this.fieldSplit = fieldSplit;
+  }
+
+  public String getValueSplit() {
+    return valueSplit;
+  }
+
+  public void setValueSplit(String valueSplit) {
+    this.valueSplit = valueSplit;
+  }
+
+  public String getValueBorders() {
+    return valueBorders;
+  }
+
+  public void setValueBorders(String valueBorders) {
+    this.valueBorders = valueBorders;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInput.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInput.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInput.java
new file mode 100644
index 0000000..fe83fe4
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInput.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.util.Map;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonInclude.Include;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+@JsonInclude(Include.NON_NULL)
+public abstract class LSServerInput {
+  private final String type;
+  private final String rowtype;
+  private final String path;
+  
+  @JsonProperty("add_fields")
+  private final Map<String, String> addFields;
+  
+  private final String source;
+  private final Boolean tail;
+  
+  @JsonProperty("gen_event_md5")
+  private final Boolean genEventMd5;
+  
+  @JsonProperty("use_event_md5_as_id")
+  private final Boolean useEventMd5AsId;
+  
+  @JsonProperty("start_position")
+  private final String startPosition;
+  
+  @JsonProperty("cache_enabled")
+  private final Boolean cacheEnabled;
+  
+  @JsonProperty("cache_key_field")
+  private final String cacheKeyField;
+  
+  @JsonProperty("cache_last_dedup_enabled")
+  private final Boolean cacheLastDedupEnabled;
+  
+  @JsonProperty("cache_size")
+  private final Integer cacheSize;
+  
+  @JsonProperty("cache_dedup_interval")
+  private final Long cacheDedupInterval;
+  
+  @JsonProperty("is_enabled")
+  private final Boolean isEnabled;
+  
+  public LSServerInput(InputDescriptor inputDescriptor) {
+    this.type = inputDescriptor.getType();
+    this.rowtype = inputDescriptor.getRowtype();
+    this.path = inputDescriptor.getPath();
+    this.addFields = inputDescriptor.getAddFields();
+    this.source = inputDescriptor.getSource();
+    this.tail = inputDescriptor.isTail();
+    this.genEventMd5 = inputDescriptor.isGenEventMd5();
+    this.useEventMd5AsId = inputDescriptor.isUseEventMd5AsId();
+    this.startPosition = inputDescriptor.getStartPosition();
+    this.cacheEnabled = inputDescriptor.isCacheEnabled();
+    this.cacheKeyField = inputDescriptor.getCacheKeyField();
+    this.cacheLastDedupEnabled = inputDescriptor.getCacheLastDedupEnabled();
+    this.cacheSize = inputDescriptor.getCacheSize();
+    this.cacheDedupInterval = inputDescriptor.getCacheDedupInterval();
+    this.isEnabled = inputDescriptor.isEnabled();
+  }
+
+  public String getType() {
+    return type;
+  }
+
+  public String getRowtype() {
+    return rowtype;
+  }
+
+  public String getPath() {
+    return path;
+  }
+
+  public Map<String, String> getAddFields() {
+    return addFields;
+  }
+
+  public String getSource() {
+    return source;
+  }
+
+  public Boolean getTail() {
+    return tail;
+  }
+
+  public Boolean getGenEventMd5() {
+    return genEventMd5;
+  }
+
+  public Boolean getUseEventMd5AsId() {
+    return useEventMd5AsId;
+  }
+
+  public String getStartPosition() {
+    return startPosition;
+  }
+
+  public Boolean getCacheEnabled() {
+    return cacheEnabled;
+  }
+
+  public String getCacheKeyField() {
+    return cacheKeyField;
+  }
+
+  public Boolean getCacheLastDedupEnabled() {
+    return cacheLastDedupEnabled;
+  }
+
+  public Integer getCacheSize() {
+    return cacheSize;
+  }
+
+  public Long getCacheDedupInterval() {
+    return cacheDedupInterval;
+  }
+
+  public Boolean getIsEnabled() {
+    return isEnabled;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputConfig.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputConfig.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputConfig.java
new file mode 100644
index 0000000..e3dc0d1
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputConfig.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterGrokDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterJsonDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterKeyValueDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputFileBaseDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputS3FileDescriptor;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel
+public class LSServerInputConfig {
+  @ApiModelProperty
+  private List<LSServerInput> input;
+  
+  @ApiModelProperty
+  private List<LSServerFilter> filter;
+  
+  public LSServerInputConfig(InputConfig inputConfig) {
+    input = new ArrayList<>();
+    for (InputDescriptor inputDescriptor : inputConfig.getInput()) {
+      if (inputDescriptor instanceof InputFileBaseDescriptor) {
+        LSServerInput inputItem = new LSServerInputFile(inputDescriptor);
+        input.add(inputItem);
+      } else if (inputDescriptor instanceof InputS3FileDescriptor) {
+        LSServerInput inputItem = new LSServerInputS3File(inputDescriptor);
+        input.add(inputItem);
+      }
+    }
+    
+    filter = new ArrayList<>();
+    for (FilterDescriptor filterDescriptor : inputConfig.getFilter()) {
+      if (filterDescriptor instanceof FilterGrokDescriptor) {
+        LSServerFilter filterItem = new LSServerFilterGrok(filterDescriptor);
+        filter.add(filterItem);
+      } else if (filterDescriptor instanceof FilterKeyValueDescriptor) {
+        LSServerFilter filterItem = new LSServerFilterKeyValue(filterDescriptor);
+        filter.add(filterItem);
+      } else if (filterDescriptor instanceof FilterJsonDescriptor) {
+        LSServerFilter filterItem = new LSServerFilterJson(filterDescriptor);
+        filter.add(filterItem);
+      }
+    }
+  }
+
+  public List<LSServerInput> getInput() {
+    return input;
+  }
+
+  public void setInput(List<LSServerInput> input) {
+    this.input = input;
+  }
+
+  public List<LSServerFilter> getFilter() {
+    return filter;
+  }
+
+  public void setFilter(List<LSServerFilter> filter) {
+    this.filter = filter;
+  }
+}


[35/50] [abbrv] ambari git commit: AMBARI-21033 Log Search use POJOs for input configuration (mgergely)

Posted by ad...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputFile.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputFile.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputFile.java
new file mode 100644
index 0000000..5c547ad
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputFile.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerInputFile extends LSServerInputFileBase {
+  public LSServerInputFile(InputDescriptor inputDescriptor) {
+    super(inputDescriptor);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputFileBase.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputFileBase.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputFileBase.java
new file mode 100644
index 0000000..df21d0d
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputFileBase.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputFileBaseDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public abstract class LSServerInputFileBase extends LSServerInput {
+  @JsonProperty("checkpoint_interval_ms")
+  private Integer checkpointIntervalMs;
+
+  @JsonProperty("process_file")
+  private Boolean processFile;
+
+  @JsonProperty("copy_file")
+  private Boolean copyFile;
+  
+  public LSServerInputFileBase(InputDescriptor inputDescriptor) {
+    super(inputDescriptor);
+    
+    InputFileBaseDescriptor inputFileBaseDescriptor = (InputFileBaseDescriptor)inputDescriptor;
+    this.checkpointIntervalMs = inputFileBaseDescriptor.getCheckpointIntervalMs();
+    this.processFile = inputFileBaseDescriptor.getProcessFile();
+    this.copyFile = inputFileBaseDescriptor.getCopyFile();
+  }
+
+  public Integer getCheckpointIntervalMs() {
+    return checkpointIntervalMs;
+  }
+
+  public void setCheckpointIntervalMs(Integer checkpointIntervalMs) {
+    this.checkpointIntervalMs = checkpointIntervalMs;
+  }
+
+  public Boolean getProcessFile() {
+    return processFile;
+  }
+
+  public void setProcessFile(Boolean processFile) {
+    this.processFile = processFile;
+  }
+
+  public Boolean getCopyFile() {
+    return copyFile;
+  }
+
+  public void setCopyFile(Boolean copyFile) {
+    this.copyFile = copyFile;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputS3File.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputS3File.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputS3File.java
new file mode 100644
index 0000000..8e9acf0
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputS3File.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputS3FileDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerInputS3File extends LSServerInputFileBase {
+  @JsonProperty("s3_access_key")
+  private String s3AccessKey;
+  
+  @JsonProperty("s3_secret_key")
+  private String s3SecretKey;
+  
+  public LSServerInputS3File(InputDescriptor inputDescriptor) {
+    super(inputDescriptor);
+    InputS3FileDescriptor inputS3FileDescriptor = (InputS3FileDescriptor)inputDescriptor;
+    this.s3AccessKey = inputS3FileDescriptor.getS3AccessKey();
+    this.s3SecretKey = inputS3FileDescriptor.getS3SecretKey();
+  }
+
+  public String getS3AccessKey() {
+    return s3AccessKey;
+  }
+
+  public void setS3AccessKey(String s3AccessKey) {
+    this.s3AccessKey = s3AccessKey;
+  }
+
+  public String getS3SecretKey() {
+    return s3SecretKey;
+  }
+
+  public void setS3SecretKey(String s3SecretKey) {
+    this.s3SecretKey = s3SecretKey;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapDate.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapDate.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapDate.java
new file mode 100644
index 0000000..dcacceb
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapDate.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapDateDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonInclude.Include;
+
+@JsonInclude(Include.NON_NULL)
+public class LSServerMapDate extends LSServerMapField {
+  @Override
+  public String getName() {
+    return "map_date";
+  }
+
+  @JsonProperty("source_date_pattern")
+  private String sourceDatePattern;
+
+  @JsonProperty("target_date_pattern")
+  private String targetDatePattern;
+
+  public LSServerMapDate(MapDateDescriptor mapDateDescriptor) {
+    this.sourceDatePattern = mapDateDescriptor.getSourceDatePattern();
+    this.targetDatePattern = mapDateDescriptor.getTargetDatePattern();
+  }
+
+  public String getSourceDatePattern() {
+    return sourceDatePattern;
+  }
+
+  public void setSourceDatePattern(String sourceDatePattern) {
+    this.sourceDatePattern = sourceDatePattern;
+  }
+
+  public String getTargetDatePattern() {
+    return targetDatePattern;
+  }
+
+  public void setTargetDatePattern(String targetDatePattern) {
+    this.targetDatePattern = targetDatePattern;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapField.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapField.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapField.java
new file mode 100644
index 0000000..b18439c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapField.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+@JsonIgnoreProperties(value = { "name" })
+public abstract class LSServerMapField {
+  public abstract String getName();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldCopy.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldCopy.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldCopy.java
new file mode 100644
index 0000000..b0bea83
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldCopy.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldCopyDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerMapFieldCopy extends LSServerMapField {
+  @Override
+  public String getName() {
+    return "map_fieldcopy";
+  }
+
+  @JsonProperty("copy_name")
+  private String copyName;
+
+  public LSServerMapFieldCopy(MapFieldCopyDescriptor mapFieldCopyDescriptor) {
+    this.copyName = mapFieldCopyDescriptor.getCopyName();
+  }
+
+  public String getCopyName() {
+    return copyName;
+  }
+
+  public void setCopyName(String copyName) {
+    this.copyName = copyName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldName.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldName.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldName.java
new file mode 100644
index 0000000..000b29d
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldName.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldNameDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerMapFieldName extends LSServerMapField {
+  @Override
+  public String getName() {
+    return "map_fieldname";
+  }
+
+  @JsonProperty("new_field_name")
+  private String newFieldName;
+
+  public LSServerMapFieldName(MapFieldNameDescriptor mapFieldNameDescriptor) {
+    this.newFieldName = mapFieldNameDescriptor.getNewFieldName();
+  }
+
+  public String getNewFieldName() {
+    return newFieldName;
+  }
+
+  public void setNewFieldName(String newFieldName) {
+    this.newFieldName = newFieldName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldValue.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldValue.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldValue.java
new file mode 100644
index 0000000..6152de5
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldValue.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldValueDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerMapFieldValue extends LSServerMapField {
+  @Override
+  public String getName() {
+    return "map_fieldvalue";
+  }
+
+  @JsonProperty("pre_value")
+  private String preValue;
+
+  @JsonProperty("post_value")
+  private String postValue;
+
+  public LSServerMapFieldValue(MapFieldValueDescriptor mapFieldValueDescriptor) {
+    this.preValue = mapFieldValueDescriptor.getPreValue();
+    this.postValue = mapFieldValueDescriptor.getPostValue();
+  }
+
+  public String getPreValue() {
+    return preValue;
+  }
+
+  public void setPreValue(String preValue) {
+    this.preValue = preValue;
+  }
+
+  public String getPostValue() {
+    return postValue;
+  }
+
+  public void setPostValue(String postValue) {
+    this.postValue = postValue;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerPostMapValues.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerPostMapValues.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerPostMapValues.java
new file mode 100644
index 0000000..5f361c9
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerPostMapValues.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapDateDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldCopyDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldNameDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldValueDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.PostMapValues;
+
+import com.fasterxml.jackson.databind.annotation.JsonSerialize;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+@JsonSerialize(using = LSServerPostMapValuesSerializer.class)
+public class LSServerPostMapValues {
+  private List<LSServerMapField> mappers;
+  
+  public LSServerPostMapValues(PostMapValues pmv) {
+    mappers = new ArrayList<>();
+    for (MapFieldDescriptor mapFieldDescriptor : pmv.getMappers()) {
+      if (mapFieldDescriptor instanceof MapDateDescriptor) {
+        mappers.add(new LSServerMapDate((MapDateDescriptor)mapFieldDescriptor));
+      } else if (mapFieldDescriptor instanceof MapFieldCopyDescriptor) {
+        mappers.add(new LSServerMapFieldCopy((MapFieldCopyDescriptor)mapFieldDescriptor));
+      } else if (mapFieldDescriptor instanceof MapFieldNameDescriptor) {
+        mappers.add(new LSServerMapFieldName((MapFieldNameDescriptor)mapFieldDescriptor));
+      } else if (mapFieldDescriptor instanceof MapFieldValueDescriptor) {
+        mappers.add(new LSServerMapFieldValue((MapFieldValueDescriptor)mapFieldDescriptor));
+      }
+    }
+  }
+
+  public List<LSServerMapField> getMappers() {
+    return mappers;
+  }
+
+  public void setMappers(List<LSServerMapField> mappers) {
+    this.mappers = mappers;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerPostMapValuesSerializer.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerPostMapValuesSerializer.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerPostMapValuesSerializer.java
new file mode 100644
index 0000000..7543677
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerPostMapValuesSerializer.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.io.IOException;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.JsonSerializer;
+import com.fasterxml.jackson.databind.SerializerProvider;
+
+public class LSServerPostMapValuesSerializer extends JsonSerializer<LSServerPostMapValues> {
+  @Override
+  public void serialize(LSServerPostMapValues value, JsonGenerator jgen, SerializerProvider provider)
+      throws IOException, JsonProcessingException {
+    jgen.writeStartObject();
+    for (LSServerMapField mapField : value.getMappers()) {
+      jgen.writeObjectField(mapField.getName(), mapField);
+    }
+    jgen.writeEndObject();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ShipperConfigResource.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ShipperConfigResource.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ShipperConfigResource.java
index 342d1cf..a7d99c9 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ShipperConfigResource.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ShipperConfigResource.java
@@ -33,6 +33,7 @@ import io.swagger.annotations.Api;
 import io.swagger.annotations.ApiOperation;
 
 import org.apache.ambari.logsearch.manager.ShipperConfigManager;
+import org.apache.ambari.logsearch.model.common.LSServerInputConfig;
 import org.apache.ambari.logsearch.model.common.LSServerLogLevelFilterMap;
 import org.springframework.context.annotation.Scope;
 
@@ -65,7 +66,8 @@ public class ShipperConfigResource {
   @Path("/input/{clusterName}/services/{serviceName}")
   @Produces({"application/json"})
   @ApiOperation(GET_SHIPPER_CONFIG_OD)
-  public String getShipperConfig(@PathParam("clusterName") String clusterName, @PathParam("serviceName") String serviceName) {
+  public LSServerInputConfig getShipperConfig(@PathParam("clusterName") String clusterName, @PathParam("serviceName")
+    String serviceName) {
     return shipperConfigManager.getInputConfig(clusterName, serviceName);
   }
 
@@ -99,7 +101,7 @@ public class ShipperConfigResource {
   @Path("/filters/{clusterName}/level")
   @Produces({"application/json"})
   @ApiOperation(UPDATE_LOG_LEVEL_FILTER_OD)
-  public Response setogLevelFilter(LSServerLogLevelFilterMap request, @PathParam("clusterName") String clusterName) {
+  public Response setLogLevelFilter(LSServerLogLevelFilterMap request, @PathParam("clusterName") String clusterName) {
     return shipperConfigManager.setLogLevelFilters(clusterName, request);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/docker/test-config/logfeeder/logfeeder.properties
----------------------------------------------------------------------
diff --git a/ambari-logsearch/docker/test-config/logfeeder/logfeeder.properties b/ambari-logsearch/docker/test-config/logfeeder/logfeeder.properties
index d171803..fb7ddf2 100644
--- a/ambari-logsearch/docker/test-config/logfeeder/logfeeder.properties
+++ b/ambari-logsearch/docker/test-config/logfeeder/logfeeder.properties
@@ -29,3 +29,4 @@ logfeeder.cache.key.field=log_message
 logfeeder.cache.dedup.interval=1000
 logfeeder.cache.last.dedup.enabled=true
 logsearch.config.zk_connect_string=localhost:9983
+logfeeder.include.default.level=FATAL,ERROR,WARN,INFO,DEBUG,TRACE,UNKNOWN

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/docker/test-config/logsearch/logsearch.properties
----------------------------------------------------------------------
diff --git a/ambari-logsearch/docker/test-config/logsearch/logsearch.properties b/ambari-logsearch/docker/test-config/logsearch/logsearch.properties
index 684d1dc..5bde17c 100644
--- a/ambari-logsearch/docker/test-config/logsearch/logsearch.properties
+++ b/ambari-logsearch/docker/test-config/logsearch/logsearch.properties
@@ -43,10 +43,6 @@ logsearch.collection.history.replication.factor=1
 logsearch.solr.metrics.collector.hosts=
 logsearch.solr.jmx.port=18886
 
-# Logfeeder Settings
-
-logsearch.logfeeder.include.default.level=FATAL,ERROR,WARN,INFO,DEBUG,TRACE,UNKNOWN
-
 # logsearch-admin.json
 logsearch.auth.file.enable=true
 logsearch.login.credentials.file=user_pass.json


[07/50] [abbrv] ambari git commit: AMBARI-21031. Add docker support for infra manager (oleewere)

Posted by ad...@apache.org.
AMBARI-21031. Add docker support for infra manager (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8bf136a4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8bf136a4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8bf136a4

Branch: refs/heads/ambari-rest-api-explorer
Commit: 8bf136a4b78b495de849565400f24a7cc2cbd834
Parents: 1568f80
Author: oleewere <ol...@gmail.com>
Authored: Tue May 16 13:57:12 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Wed May 17 19:35:46 2017 +0200

----------------------------------------------------------------------
 ambari-infra/ambari-infra-assembly/pom.xml      |  6 +-
 ambari-infra/ambari-infra-manager/README.md     |  6 ++
 ambari-infra/ambari-infra-manager/build.xml     |  1 +
 .../ambari-infra-manager/docker/Dockerfile      | 52 ++++++++++++
 .../ambari-infra-manager/docker/bin/start.sh    | 21 +++++
 .../docker/infra-manager-docker.sh              | 85 ++++++++++++++++++++
 .../src/main/resources/infra-manager-env.sh     | 18 +++++
 .../src/main/resources/infraManager.sh          |  2 +-
 8 files changed, 188 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-assembly/pom.xml b/ambari-infra/ambari-infra-assembly/pom.xml
index 550d97c..fafef7e 100644
--- a/ambari-infra/ambari-infra-assembly/pom.xml
+++ b/ambari-infra/ambari-infra-assembly/pom.xml
@@ -140,6 +140,7 @@
                           <excludes>
                             <exclude>log4j.xml</exclude>
                             <exclude>infra-manager.properties</exclude>
+                            <exclude>infra-manager-env.sh</exclude>
                           </excludes>
                         </source>
                       </sources>
@@ -152,6 +153,7 @@
                           <includes>
                             <include>log4j.xml</include>
                             <include>infra-manager.properties</include>
+                            <include>infra-manager-env.sh</include>
                           </includes>
                         </source>
                       </sources>
@@ -341,7 +343,7 @@
                         <prefix>${infra-manager.mapping.path}</prefix>
                       </mapper>
                       <excludes>
-                        log4j.xml,infra-manager.properties
+                        log4j.xml,infra-manager.properties,infra-manager-env.sh
                       </excludes>
                     </data>
                     <data>
@@ -355,7 +357,7 @@
                         <filemode>644</filemode>
                       </mapper>
                       <includes>
-                        log4j.xml,infra-manager.properties
+                        log4j.xml,infra-manager.properties,infra-manager-env.sh
                       </includes>
                     </data>
                   </dataSet>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-manager/README.md
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/README.md b/ambari-infra/ambari-infra-manager/README.md
index 033bbb2..d3527c4 100644
--- a/ambari-infra/ambari-infra-manager/README.md
+++ b/ambari-infra/ambari-infra-manager/README.md
@@ -22,4 +22,10 @@ TODO
 ## Build & Run Application
 ```bash
 mvn clean package exec:java
+```
+
+## Build & Run Application in docker container
+```bash
+cd docker
+./infra-manager-docker.sh
 ```
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-manager/build.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/build.xml b/ambari-infra/ambari-infra-manager/build.xml
index c7954d9..3d0f4da 100644
--- a/ambari-infra/ambari-infra-manager/build.xml
+++ b/ambari-infra/ambari-infra-manager/build.xml
@@ -35,6 +35,7 @@
     </copy>
     <copy todir="target/package" includeEmptyDirs="no">
       <fileset file="src/main/resources/infraManager.sh"/>
+      <fileset file="src/main/resources/infra-manager-env.sh"/>
       <fileset file="target/classes/infra-manager.properties"/>
       <fileset file="target/classes/log4j.xml"/>
     </copy>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-manager/docker/Dockerfile
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/docker/Dockerfile b/ambari-infra/ambari-infra-manager/docker/Dockerfile
new file mode 100644
index 0000000..adb584a
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/docker/Dockerfile
@@ -0,0 +1,52 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+FROM centos:centos6
+
+RUN echo root:changeme | chpasswd
+
+RUN yum clean all -y && yum update -y
+RUN yum -y install vim wget rpm-build sudo which telnet tar openssh-server openssh-clients ntp git httpd lsof
+RUN rpm -e --nodeps --justdb glibc-common
+RUN yum -y install glibc-common
+
+ENV HOME /root
+
+#Install JAVA
+ENV JAVA_VERSION 8u31
+ENV BUILD_VERSION b13
+RUN wget --no-cookies --no-check-certificate --header "Cookie: oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/$JAVA_VERSION-$BUILD_VERSION/jdk-$JAVA_VERSION-linux-x64.rpm" -O jdk-8-linux-x64.rpm
+RUN rpm -ivh jdk-8-linux-x64.rpm
+ENV JAVA_HOME /usr/java/default/
+
+#Install Maven
+RUN mkdir -p /opt/maven
+WORKDIR /opt/maven
+RUN wget http://archive.apache.org/dist/maven/maven-3/3.3.1/binaries/apache-maven-3.3.1-bin.tar.gz
+RUN tar -xvzf /opt/maven/apache-maven-3.3.1-bin.tar.gz
+RUN rm -rf /opt/maven/apache-maven-3.3.1-bin.tar.gz
+
+ENV M2_HOME /opt/maven/apache-maven-3.3.1
+ENV MAVEN_OPTS -Xmx2048m
+ENV PATH $PATH:$JAVA_HOME/bin:$M2_HOME/bin
+
+# SSH key
+RUN ssh-keygen -f /root/.ssh/id_rsa -t rsa -N ''
+RUN cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
+RUN chmod 600 /root/.ssh/authorized_keys
+RUN sed -ri 's/UsePAM yes/UsePAM no/g' /etc/ssh/sshd_config
+
+ADD bin/start.sh /root/start.sh
+RUN chmod +x /root/start.sh
+
+WORKDIR /root
+CMD /root/start.sh
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-manager/docker/bin/start.sh
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/docker/bin/start.sh b/ambari-infra/ambari-infra-manager/docker/bin/start.sh
new file mode 100755
index 0000000..076c06f
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/docker/bin/start.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+export INFRA_MANAGER_OPTS="-Xdebug -Xrunjdwp:transport=dt_socket,address=5007,server=y,suspend=n"
+touch /root/infra-manager.log
+/root/ambari-infra-manager/infraManager.sh --port 61890 > /root/infra-manager.log
+tail -f /root/infra-manager.log
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-manager/docker/infra-manager-docker.sh
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/docker/infra-manager-docker.sh b/ambari-infra/ambari-infra-manager/docker/infra-manager-docker.sh
new file mode 100755
index 0000000..87d6b8a
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/docker/infra-manager-docker.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+sdir="`dirname \"$0\"`"
+: ${1:?"argument is missing: (start|stop|build-and-run|build|build-docker-and-run|build-mvn-and-run|build-docker-only|build-mvn-only)"}
+command="$1"
+
+function build_infra_manager_container() {
+  pushd $sdir
+  docker build -t ambari-infra-manager:v1.0 .
+  popd
+}
+
+function build_infra_manager_project() {
+  pushd $sdir/../
+  mvn clean package -DskipTests
+  popd
+}
+
+function kill_infra_manager_container() {
+  echo "Try to remove infra manager container if exists ..."
+  docker rm -f infra-manager
+}
+
+function start_infra_manager_container() {
+ echo "Start infra manager container ..."
+ pushd $sdir/../
+ local AMBARI_INFRA_MANAGER_LOCATION=$(pwd)
+ popd
+ kill_infra_manager_container
+ docker run -d --name infra-manager --hostname infra-manager.apache.org \
+   -v $AMBARI_INFRA_MANAGER_LOCATION/target/package:/root/ambari-infra-manager -p 61890:61890 -p 5007:5007 \
+   ambari-infra-manager:v1.0
+  ip_address=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' logsearch)
+  echo "Ambari Infra Manager container started on $ip_address (for Mac OSX route to boot2docker/docker-machine VM address, e.g.: 'sudo route add -net 172.17.0.0/16 192.168.59.103')"
+  echo "You can follow Log Search logs with 'docker logs -f infra-manager' command"
+}
+
+case $command in
+  "build-and-run")
+     build_infra_manager_project
+     build_infra_manager_container
+     start_infra_manager_container
+     ;;
+  "build")
+     build_infra_manager_project
+     start_infra_manager_container
+     ;;
+  "build-docker-and-run")
+     build_infra_manager_container
+     start_infra_manager_container
+     ;;
+  "build-mvn-and-run")
+     build_infra_manager_project
+     build_infra_manager_container
+     ;;
+  "build-docker-only")
+     build_infra_manager_container
+     ;;
+  "build-mvn-only")
+     build_infra_manager_project
+     ;;
+  "start")
+     start_infra_manager_container
+     ;;
+  "stop")
+     kill_infra_manager_container
+     ;;
+   *)
+   echo "Available commands: (start|stop|build-and-run|build|build-docker-and-run|build-mvn-and-run|build-docker-only|build-mvn-only)"
+   ;;
+esac
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-manager/src/main/resources/infra-manager-env.sh
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/resources/infra-manager-env.sh b/ambari-infra/ambari-infra-manager/src/main/resources/infra-manager-env.sh
new file mode 100644
index 0000000..c7e11c3
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/resources/infra-manager-env.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Extend with java options or system properties. e.g.: INFRA_MANAGER_OPTS="-Xdebug -Xrunjdwp:transport=dt_socket,address=5007,server=y,suspend=n"
+export INFRA_MANAGER_OPTS=""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh b/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh
index 9f40d5c..65287b2 100644
--- a/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh
+++ b/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh
@@ -17,4 +17,4 @@
 JVM="java"
 sdir="`dirname \"$0\"`"
 
-PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "/etc/ambari-infra-manager/conf:$sdir:$sdir/libs/*" org.apache.ambari.infra.InfraManager ${1+"$@"}
\ No newline at end of file
+PATH=$JAVA_HOME/bin:$PATH nohup $JVM -classpath "/etc/ambari-infra-manager/conf:$sdir:$sdir/libs/*" $INFRA_MANAGER_OPTS org.apache.ambari.infra.InfraManager ${1+"$@"} &
\ No newline at end of file


[12/50] [abbrv] ambari git commit: AMBARI-21039. Atlas web UI inaccessible after adding Atlas service on upgraded cluster with Hive because /etc/atlas/conf symlink was created ahead of time (alejandro)

Posted by ad...@apache.org.
AMBARI-21039. Atlas web UI inaccessible after adding Atlas service on upgraded cluster with Hive because /etc/atlas/conf symlink was created ahead of time (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9cb87011
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9cb87011
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9cb87011

Branch: refs/heads/ambari-rest-api-explorer
Commit: 9cb87011c716e6fc8eeec0b2ca57a75fa9c7d2d9
Parents: 2e27f66
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Thu May 18 12:16:33 2017 -0400
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Thu May 18 12:16:33 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/conf_select.py                 | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9cb87011/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
index ce00f0c..facf186 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
@@ -356,11 +356,16 @@ def select(stack_name, package, version, try_create=True, ignore_errors=False):
               then the Atlas RPM will not be able to copy its artifacts into /etc/atlas/conf directory and therefore
               prevent Ambari from by copying those unmanaged contents into /etc/atlas/$version/0
               '''
-              parent_dir = os.path.dirname(current_dir)
-              if os.path.exists(parent_dir):
-                Link(conf_dir, to=current_dir)
+              component_list = default("/localComponents", [])
+              if "ATLAS_SERVER" in component_list or "ATLAS_CLIENT" in component_list:
+                Logger.info("Atlas is installed on this host.")
+                parent_dir = os.path.dirname(current_dir)
+                if os.path.exists(parent_dir):
+                  Link(conf_dir, to=current_dir)
+                else:
+                  Logger.info("Will not create symlink from {0} to {1} because the destination's parent dir does not exist.".format(conf_dir, current_dir))
               else:
-                Logger.info("Will not create symlink from {0} to {1} because the destination's parent dir does not exist.".format(conf_dir, current_dir))
+                Logger.info("Will not create symlink from {0} to {1} because Atlas is not installed on this host.".format(conf_dir, current_dir))
             else:
               # Normal path for other packages
               Link(conf_dir, to=current_dir)


[43/50] [abbrv] ambari git commit: AMBARI-21081 : Upgrade to 2.5.1 from 2.5.0 adds cgroups related configs back to YARN and indicates restart required. (smohanty via avijayan)

Posted by ad...@apache.org.
AMBARI-21081 : Upgrade to 2.5.1 from 2.5.0 adds cgroups related configs back to YARN and indicates restart required. (smohanty via avijayan)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6c683214
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6c683214
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6c683214

Branch: refs/heads/ambari-rest-api-explorer
Commit: 6c68321448099167748efc8981103934a617dc11
Parents: 5ea441a
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Mon May 22 10:31:50 2017 -0700
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Mon May 22 10:31:50 2017 -0700

----------------------------------------------------------------------
 .../stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6c683214/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
index 0eb3366..d0b4bb1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
@@ -193,7 +193,7 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
@@ -205,7 +205,7 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
@@ -217,7 +217,7 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
@@ -229,7 +229,7 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>


[02/50] [abbrv] ambari git commit: AMBARI-20532 Ambari-server CLI to setup Database Options Broken (dsen)

Posted by ad...@apache.org.
AMBARI-20532 Ambari-server CLI to setup Database Options Broken (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/772be786
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/772be786
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/772be786

Branch: refs/heads/ambari-rest-api-explorer
Commit: 772be786d930322e4a95e4755c36ffece24d30e4
Parents: 735c413
Author: Dmytro Sen <ds...@apache.org>
Authored: Wed May 17 19:07:44 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Wed May 17 19:07:44 2017 +0300

----------------------------------------------------------------------
 ambari-server/src/main/python/ambari-server.py  | 299 ++++++++------
 .../main/python/ambari_server/setupMpacks.py    |   7 +-
 .../src/test/python/TestAmbariServer.py         | 409 ++++++++++---------
 3 files changed, 389 insertions(+), 326 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/772be786/ambari-server/src/main/python/ambari-server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index 737be6a..4f680cb 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -367,7 +367,7 @@ def print_action_arguments_help(action):
             ";".join([print_opt for print_opt, _ in optional_options]))
 
 @OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
-def init_parser_options(parser):
+def init_action_parser(action, parser):
   parser.add_option('-k', '--service-user-name', dest="svc_user",
                     default=None,
                     help="User account under which the Ambari Server service will run")
@@ -455,31 +455,58 @@ def init_parser_options(parser):
   # -h reserved for help
 
 @OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
-def init_parser_options(parser):
-  parser.add_option('-f', '--init-script-file', default=None,
-                    help="File with setup script")
-  parser.add_option('-r', '--drop-script-file', default=None,
-                    help="File with drop script")
-  parser.add_option('-u', '--upgrade-script-file', default=AmbariPath.get("/var/lib/"
-                                                           "ambari-server/resources/upgrade/ddl/"
-                                                           "Ambari-DDL-Postgres-UPGRADE-1.3.0.sql"),
-                    help="File with upgrade script")
-  parser.add_option('-t', '--upgrade-stack-script-file', default=AmbariPath.get("/var/lib/"
-                                                                 "ambari-server/resources/upgrade/dml/"
-                                                                 "Ambari-DML-Postgres-UPGRADE_STACK.sql"),
-                    help="File with stack upgrade script")
-  parser.add_option('-j', '--java-home', default=None,
-                    help="Use specified java_home.  Must be valid on all hosts")
-  parser.add_option("-v", "--verbose",
-                    action="store_true", dest="verbose", default=False,
-                    help="Print verbose status messages")
-  parser.add_option("-s", "--silent",
-                    action="store_true", dest="silent", default=False,
-                    help="Silently accepts default prompt values. For db-cleanup command, silent mode will stop ambari server.")
+def init_setup_parser_options(parser):
+  database_group = optparse.OptionGroup(parser, 'Database options (command need to include all options)')
+  database_group.add_option('--database', default=None, help="Database to use embedded|oracle|mysql|mssql|postgres|sqlanywhere", dest="dbms")
+  database_group.add_option('--databasehost', default=None, help="Hostname of database server", dest="database_host")
+  database_group.add_option('--databaseport', default=None, help="Database port", dest="database_port")
+  database_group.add_option('--databasename', default=None, help="Database/Service name or ServiceID",
+                            dest="database_name")
+  database_group.add_option('--databaseusername', default=None, help="Database user login", dest="database_username")
+  database_group.add_option('--databasepassword', default=None, help="Database user password", dest="database_password")
+  parser.add_option_group(database_group)
+
+  jdbc_group = optparse.OptionGroup(parser, 'JDBC options (command need to include all options)')
+  jdbc_group.add_option('--jdbc-driver', default=None, help="Specifies the path to the JDBC driver JAR file or archive " \
+                                                            "with all required files(jdbc jar, libraries and etc), for the " \
+                                                            "database type specified with the --jdbc-db option. " \
+                                                            "Used only with --jdbc-db option. Archive is supported only for" \
+                                                            " sqlanywhere database." ,
+                        dest="jdbc_driver")
+  jdbc_group.add_option('--jdbc-db', default=None, help="Specifies the database type [postgres|mysql|mssql|oracle|hsqldb|sqlanywhere] for the " \
+                                                        "JDBC driver specified with the --jdbc-driver option. Used only with --jdbc-driver option.",
+                        dest="jdbc_db")
+  parser.add_option_group(jdbc_group)
+
+  other_group = optparse.OptionGroup(parser, 'Other options')
+
+  other_group.add_option('-j', '--java-home', default=None,
+                         help="Use specified java_home.  Must be valid on all hosts")
+  other_group.add_option('--skip-view-extraction', action="store_true", default=False, help="Skip extraction of system views", dest="skip_view_extraction")
+  other_group.add_option('--postgresschema', default=None, help="Postgres database schema name",
+                         dest="postgres_schema")
+  other_group.add_option('--sqla-server-name', default=None, help="SQL Anywhere server name", dest="sqla_server_name")
+  other_group.add_option('--sidorsname', default="sname", help="Oracle database identifier type, Service ID/Service "
+                                                               "Name sid|sname", dest="sid_or_sname")
+
+  parser.add_option_group(other_group)
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_start_parser_options(parser):
   parser.add_option('-g', '--debug', action="store_true", dest='debug', default=False,
                     help="Start ambari-server in debug mode")
   parser.add_option('-y', '--suspend-start', action="store_true", dest='suspend_start', default=False,
                     help="Freeze ambari-server Java process at startup in debug mode")
+  parser.add_option('--skip-properties-validation', action="store_true", default=False, help="Skip properties file validation", dest="skip_properties_validation")
+  parser.add_option('--skip-database-check', action="store_true", default=False, help="Skip database consistency check", dest="skip_database_check")
+  parser.add_option('--auto-fix-database', action="store_true", default=False, help="Automatically fix database consistency issues", dest="fix_database_consistency")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_empty_parser_options(parser):
+  pass
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_ldap_sync_parser_options(parser):
   parser.add_option('--all', action="store_true", default=False, help="LDAP sync all option.  Synchronize all LDAP users and groups.",
                     dest="ldap_sync_all")
   parser.add_option('--existing', action="store_true", default=False,
@@ -488,79 +515,11 @@ def init_parser_options(parser):
                     dest="ldap_sync_users")
   parser.add_option('--groups', default=None, help="LDAP sync groups option.  Specifies the path to a CSV file of group names to be synchronized.",
                     dest="ldap_sync_groups")
-  parser.add_option('--database', default=None, help="Database to use embedded|oracle|mysql|mssql|postgres|sqlanywhere", dest="dbms")
-  parser.add_option('--databasehost', default=None, help="Hostname of database server", dest="database_host")
-  parser.add_option('--databaseport', default=None, help="Database port", dest="database_port")
-  parser.add_option('--databasename', default=None, help="Database/Service name or ServiceID",
-                    dest="database_name")
-  parser.add_option('--postgresschema', default=None, help="Postgres database schema name",
-                    dest="postgres_schema")
-  parser.add_option('--databaseusername', default=None, help="Database user login", dest="database_username")
-  parser.add_option('--databasepassword', default=None, help="Database user password", dest="database_password")
-  parser.add_option('--sidorsname', default="sname", help="Oracle database identifier type, Service ID/Service "
-                                                          "Name sid|sname", dest="sid_or_sname")
-  parser.add_option('--sqla-server-name', default=None, help="SQL Anywhere server name", dest="sqla_server_name")
-  parser.add_option('--jdbc-driver', default=None, help="Specifies the path to the JDBC driver JAR file or archive " \
-                                                        "with all required files(jdbc jar, libraries and etc), for the " \
-                                                        "database type specified with the --jdbc-db option. " \
-                                                        "Used only with --jdbc-db option. Archive is supported only for" \
-                                                        " sqlanywhere database." ,
-                    dest="jdbc_driver")
-  parser.add_option('--jdbc-db', default=None, help="Specifies the database type [postgres|mysql|mssql|oracle|hsqldb|sqlanywhere] for the " \
-                                                    "JDBC driver specified with the --jdbc-driver option. Used only with --jdbc-driver option.",
-                    dest="jdbc_db")
-  parser.add_option('--cluster-name', default=None, help="Cluster name", dest="cluster_name")
-  parser.add_option('--version-display-name', default=None, help="Display name of desired repo version", dest="desired_repo_version")
-  parser.add_option('--skip-properties-validation', action="store_true", default=False, help="Skip properties file validation", dest="skip_properties_validation")
-  parser.add_option('--skip-database-check', action="store_true", default=False, help="Skip database consistency check", dest="skip_database_check")
-  parser.add_option('--skip-view-extraction', action="store_true", default=False, help="Skip extraction of system views", dest="skip_view_extraction")
-  parser.add_option('--auto-fix-database', action="store_true", default=False, help="Automatically fix database consistency issues", dest="fix_database_consistency")
-  parser.add_option('--force-version', action="store_true", default=False, help="Force version to current", dest="force_repo_version")
-  parser.add_option('--version', dest="stack_versions", default=None, action="append", type="string",
-                    help="Specify stack version that needs to be enabled. All other stacks versions will be disabled")
-  parser.add_option('--stack', dest="stack_name", default=None, type="string",
-                    help="Specify stack name for the stack versions that needs to be enabled")
-  parser.add_option("-d", "--from-date", dest="cleanup_from_date", default=None, type="string", help="Specify date for the cleanup process in 'yyyy-MM-dd' format")
-  add_parser_options('--mpack',
-      default=None,
-      help="Specify the path for management pack to be installed/upgraded",
-      dest="mpack_path",
-      parser=parser,
-      required_for_actions=[INSTALL_MPACK_ACTION, UPGRADE_MPACK_ACTION]
-  )
-  add_parser_options('--mpack-name',
-      default=None,
-      help="Specify the management pack name to be uninstalled",
-      dest="mpack_name",
-      parser=parser,
-      required_for_actions=[UNINSTALL_MPACK_ACTION]
-  )
-  add_parser_options('--purge',
-      action="store_true",
-      default=False,
-      help="Purge existing resources specified in purge-list",
-      dest="purge",
-      parser=parser,
-      optional_for_actions=[INSTALL_MPACK_ACTION]
-  )
-  purge_resources = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, SERVICE_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
-  default_purge_resources = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
-  add_parser_options('--purge-list',
-      default=default_purge_resources,
-      help="Comma separated list of resources to purge ({0}). By default ({1}) will be purged.".format(purge_resources, default_purge_resources),
-      dest="purge_list",
-      parser=parser,
-      optional_for_actions=[INSTALL_MPACK_ACTION]
-  )
-  add_parser_options('--force',
-      action="store_true",
-      default=False,
-      help="Force install management pack",
-      dest="force",
-      parser=parser,
-      optional_for_actions=[INSTALL_MPACK_ACTION]
-  )
+  parser.add_option('--ldap-sync-admin-name', default=None, help="Username for LDAP sync", dest="ldap_sync_admin_name")
+  parser.add_option('--ldap-sync-admin-password', default=None, help="Password for LDAP sync", dest="ldap_sync_admin_password")
 
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_ldap_setup_parser_options(parser):
   parser.add_option('--ldap-url', default=None, help="Primary url for LDAP", dest="ldap_url")
   parser.add_option('--ldap-secondary-url', default=None, help="Secondary url for LDAP", dest="ldap_secondary_url")
   parser.add_option('--ldap-ssl', default=None, help="Use SSL [true/false] for LDAP", dest="ldap_ssl")
@@ -576,29 +535,83 @@ def init_parser_options(parser):
   parser.add_option('--ldap-save-settings', action="store_true", default=None, help="Save without review for LDAP", dest="ldap_save_settings")
   parser.add_option('--ldap-referral', default=None, help="Referral method [follow/ignore] for LDAP", dest="ldap_referral")
   parser.add_option('--ldap-bind-anonym', default=None, help="Bind anonymously [true/false] for LDAP", dest="ldap_bind_anonym")
-  parser.add_option('--ldap-sync-admin-name', default=None, help="Username for LDAP sync", dest="ldap_sync_admin_name")
-  parser.add_option('--ldap-sync-admin-password', default=None, help="Password for LDAP sync", dest="ldap_sync_admin_password")
   parser.add_option('--ldap-sync-username-collisions-behavior', default=None, help="Handling behavior for username collisions [convert/skip] for LDAP sync", dest="ldap_sync_username_collisions_behavior")
 
-  parser.add_option('--truststore-type', default=None, help="Type of TrustStore (jks|jceks|pkcs12)", dest="trust_store_type")
-  parser.add_option('--truststore-path', default=None, help="Path of TrustStore", dest="trust_store_path")
-  parser.add_option('--truststore-password', default=None, help="Password for TrustStore", dest="trust_store_password")
-  parser.add_option('--truststore-reconfigure', action="store_true", default=None, help="Force to reconfigure TrustStore if exits", dest="trust_store_reconfigure")
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_set_current_parser_options(parser):
+  parser.add_option('--cluster-name', default=None, help="Cluster name", dest="cluster_name")
+  parser.add_option('--version-display-name', default=None, help="Display name of desired repo version", dest="desired_repo_version")
+  parser.add_option('--force-version', action="store_true", default=False, help="Force version to current", dest="force_repo_version")
 
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_setup_security_parser_options(parser):
   parser.add_option('--security-option', default=None,
                     help="Setup security option (setup-https|encrypt-password|setup-kerberos-jaas|setup-truststore|import-certificate)",
                     dest="security_option")
-  parser.add_option('--api-ssl', default=None, help="Enable SSL for Ambari API [true/false]", dest="api_ssl")
-  parser.add_option('--api-ssl-port', default=None, help="Client API SSL port", dest="api_ssl_port")
-  parser.add_option('--import-cert-path', default=None, help="Path to Certificate (import)", dest="import_cert_path")
-  parser.add_option('--import-cert-alias', default=None, help="Alias for the imported certificate", dest="import_cert_alias")
-  parser.add_option('--import-key-path', default=None, help="Path to Private Key (import)", dest="import_key_path")
-  parser.add_option('--pem-password', default=None, help="Password for Private Key", dest="pem_password")
-  parser.add_option('--master-key', default=None, help="Master key for encrypting passwords", dest="master_key")
-  parser.add_option('--master-key-persist', default=None, help="Persist master key [true/false]", dest="master_key_persist")
-  parser.add_option('--jaas-principal', default=None, help="Kerberos principal for ambari server", dest="jaas_principal")
-  parser.add_option('--jaas-keytab', default=None, help="Keytab path for Kerberos principal", dest="jaas_keytab")
 
+  https_group = optparse.OptionGroup(parser, "setup-https options")
+  https_group.add_option('--api-ssl', default=None, help="Enable SSL for Ambari API [true/false]", dest="api_ssl")
+  https_group.add_option('--api-ssl-port', default=None, help="Client API SSL port", dest="api_ssl_port")
+  https_group.add_option('--import-key-path', default=None, help="Path to Private Key (import)", dest="import_key_path")
+  https_group.add_option('--pem-password', default=None, help="Password for Private Key", dest="pem_password")
+  parser.add_option_group(https_group)
+
+  encrypt_passwords_group = optparse.OptionGroup(parser, "encrypt-passwords options")
+  encrypt_passwords_group.add_option('--master-key', default=None, help="Master key for encrypting passwords", dest="master_key")
+  encrypt_passwords_group.add_option('--master-key-persist', default=None, help="Persist master key [true/false]", dest="master_key_persist")
+  parser.add_option_group(encrypt_passwords_group)
+
+  setup_kerberos_jaas_group = optparse.OptionGroup(parser, "setup-kerberos-jaas options")
+  setup_kerberos_jaas_group.add_option('--jaas-principal', default=None, help="Kerberos principal for ambari server", dest="jaas_principal")
+  setup_kerberos_jaas_group.add_option('--jaas-keytab', default=None, help="Keytab path for Kerberos principal", dest="jaas_keytab")
+  parser.add_option_group(setup_kerberos_jaas_group)
+
+  setup_truststore_group = optparse.OptionGroup(parser, "setup-truststore options, uses encrypt-passwords options if configured")
+  setup_truststore_group.add_option('--truststore-type', default=None, help="Type of TrustStore (jks|jceks|pkcs12)", dest="trust_store_type")
+  setup_truststore_group.add_option('--truststore-path', default=None, help="Path of TrustStore", dest="trust_store_path")
+  setup_truststore_group.add_option('--truststore-password', default=None, help="Password for TrustStore", dest="trust_store_password")
+  setup_truststore_group.add_option('--truststore-reconfigure', action="store_true", default=None, help="Force to reconfigure TrustStore if exits", dest="trust_store_reconfigure")
+  parser.add_option_group(setup_truststore_group)
+
+  import_certificate_group = optparse.OptionGroup(parser, "import-certificate options, uses --truststore-path option")
+  import_certificate_group.add_option('--import-cert-path', default=None, help="Path to Certificate (import)", dest="import_cert_path")
+  import_certificate_group.add_option('--import-cert-alias', default=None, help="Alias for the imported certificate", dest="import_cert_alias")
+  parser.add_option_group(import_certificate_group)
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_enable_stack_parser_options(parser):
+  parser.add_option('--version', dest="stack_versions", default=None, action="append", type="string",
+                    help="Specify stack version that needs to be enabled. All other stacks versions will be disabled")
+  parser.add_option('--stack', dest="stack_name", default=None, type="string",
+                    help="Specify stack name for the stack versions that needs to be enabled")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_db_cleanup_parser_options(parser):
+  parser.add_option('--cluster-name', default=None, help="Cluster name", dest="cluster_name")
+  parser.add_option("-d", "--from-date", dest="cleanup_from_date", default=None, type="string", help="Specify date for the cleanup process in 'yyyy-MM-dd' format")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_install_mpack_parser_options(parser):
+  parser.add_option('--mpack', default=None, help="Specify the path for management pack to be installed", dest="mpack_path")
+  parser.add_option('--purge', action="store_true", default=False, help="Purge existing resources specified in purge-list", dest="purge")
+  purge_resources = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, SERVICE_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
+  default_purge_resources = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
+
+  parser.add_option('--purge-list', default=default_purge_resources,
+                    help="Comma separated list of resources to purge ({0}). By default ({1}) will be purged.".format(purge_resources, default_purge_resources),
+                    dest="purge_list")
+  parser.add_option('--force', action="store_true", default=False, help="Force install management pack", dest="force")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_uninstall_mpack_parser_options(parser):
+  parser.add_option('--mpack-name', default=None, help="Specify the management pack name to be uninstalled", dest="mpack_name")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_upgrade_mpack_parser_options(parser):
+  parser.add_option('--mpack', default=None, help="Specify the path for management pack to be updated", dest="mpack_path")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_kerberos_setup_parser_options(parser):
   parser.add_option('--kerberos-setup', default=None, help="Setup Kerberos Authentication", dest="kerberos_setup")
   parser.add_option('--kerberos-enabled', default=False, help="Kerberos enabled", dest="kerberos_enabled")
   parser.add_option('--kerberos-spnego-principal', default="HTTP/_HOST", help="Kerberos SPNEGO principal", dest="kerberos_spnego_principal")
@@ -774,6 +787,46 @@ def create_user_action_map(args, options):
       }
   return action_map
 
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_action_parser(action, parser):
+  action_parser_map = {
+    SETUP_ACTION: init_setup_parser_options,
+    SETUP_JCE_ACTION: init_empty_parser_options,
+    START_ACTION: init_start_parser_options,
+    STOP_ACTION: init_empty_parser_options,
+    RESTART_ACTION: init_start_parser_options,
+    RESET_ACTION: init_empty_parser_options,
+    STATUS_ACTION: init_empty_parser_options,
+    UPGRADE_ACTION: init_empty_parser_options,
+    UPGRADE_STACK_ACTION:init_empty_parser_options,
+    LDAP_SETUP_ACTION: init_ldap_setup_parser_options,
+    LDAP_SYNC_ACTION: init_ldap_sync_parser_options,
+    SET_CURRENT_ACTION: init_set_current_parser_options,
+    SETUP_SECURITY_ACTION: init_setup_security_parser_options,
+    REFRESH_STACK_HASH_ACTION: init_empty_parser_options,
+    BACKUP_ACTION: init_empty_parser_options,
+    RESTORE_ACTION: init_empty_parser_options,
+    UPDATE_HOST_NAMES_ACTION: init_empty_parser_options,
+    CHECK_DATABASE_ACTION: init_empty_parser_options,
+    ENABLE_STACK_ACTION: init_enable_stack_parser_options,
+    SETUP_SSO_ACTION: init_empty_parser_options,
+    DB_CLEANUP_ACTION: init_db_cleanup_parser_options,
+    INSTALL_MPACK_ACTION: init_install_mpack_parser_options,
+    UNINSTALL_MPACK_ACTION: init_uninstall_mpack_parser_options,
+    UPGRADE_MPACK_ACTION: init_upgrade_mpack_parser_options,
+    PAM_SETUP_ACTION: init_empty_parser_options,
+    KERBEROS_SETUP_ACTION: init_kerberos_setup_parser_options,
+  }
+  parser.add_option("-v", "--verbose",
+                    action="store_true", dest="verbose", default=False,
+                    help="Print verbose status messages")
+  parser.add_option("-s", "--silent",
+                    action="store_true", dest="silent", default=False,
+                    help="Silently accepts default prompt values. For db-cleanup command, silent mode will stop ambari server.")
+  try:
+    action_parser_map[action](parser)
+  except KeyError:
+    parser.error("Invalid action: " + action)
 
 def setup_logging(logger, filename, logging_level):
   formatter = logging.Formatter(formatstr)
@@ -825,16 +878,6 @@ def main(options, args, parser):
 
   options.warnings = []
 
-  if are_cmd_line_db_args_blank(options):
-    options.must_set_database_options = True
-  elif not are_cmd_line_db_args_valid(options):
-    parser.error('All database options should be set. Please see help for the options.')
-  else:
-    options.must_set_database_options = False
-
-  #correct database
-  fix_database_options(options, parser)
-
   if len(args) == 0:
     print parser.print_help()
     parser.error("No action entered")
@@ -848,6 +891,17 @@ def main(options, args, parser):
   except KeyError:
     parser.error("Invalid action: " + action)
 
+  if action == SETUP_ACTION:
+    if are_cmd_line_db_args_blank(options):
+      options.must_set_database_options = True
+    elif not are_cmd_line_db_args_valid(options):
+      parser.error('All database options should be set. Please see help for the options.')
+    else:
+      options.must_set_database_options = False
+
+    #correct database
+    fix_database_options(options, parser)
+
   matches = 0
   for args_number_required in action_obj.possible_args_numbers:
     matches += int(len(args) == args_number_required)
@@ -900,8 +954,9 @@ def main(options, args, parser):
     sys.exit(options.exit_code)
 
 def mainBody():
-  parser = optparse.OptionParser(usage="usage: %prog [options] action [stack_id os]",)
-  init_parser_options(parser)
+  parser = optparse.OptionParser(usage="usage: %prog action [options]",)
+  action = sys.argv[1]
+  init_action_parser(action, parser)
   (options, args) = parser.parse_args()
 
   # check if only silent key set

http://git-wip-us.apache.org/repos/asf/ambari/blob/772be786/ambari-server/src/main/python/ambari_server/setupMpacks.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/setupMpacks.py b/ambari-server/src/main/python/ambari_server/setupMpacks.py
index aaf9c10..625e428 100755
--- a/ambari-server/src/main/python/ambari_server/setupMpacks.py
+++ b/ambari-server/src/main/python/ambari_server/setupMpacks.py
@@ -714,7 +714,7 @@ def _install_mpack(options, replay_mode=False, is_upgrade=False):
     _execute_hook(mpack_metadata, BEFORE_INSTALL_HOOK_NAME, tmp_root_dir)
 
   # Purge previously installed stacks and management packs
-  if options.purge and options.purge_list:
+  if not is_upgrade and options.purge and options.purge_list:
     purge_resources = options.purge_list.split(",")
     validate_purge(options, purge_resources, tmp_root_dir, mpack_metadata, replay_mode)
     purge_stacks_and_mpacks(purge_resources, replay_mode)
@@ -934,9 +934,6 @@ def upgrade_mpack(options, replay_mode=False):
   """
   logger.info("Upgrade mpack.")
   mpack_path = options.mpack_path
-  if options.purge:
-    print_error_msg("Purge is not supported with upgrade_mpack action!")
-    raise FatalException(-1, "Purge is not supported with upgrade_mpack action!")
 
   if not mpack_path:
     print_error_msg("Management pack not specified!")
@@ -962,7 +959,7 @@ def upgrade_mpack(options, replay_mode=False):
 
   print_info_msg("Management pack {0}-{1} successfully upgraded!".format(mpack_name, mpack_version))
   if not replay_mode:
-    add_replay_log(UPGRADE_MPACK_ACTION, mpack_archive_path, options.purge, options.purge_list, options.force, options.verbose)
+    add_replay_log(UPGRADE_MPACK_ACTION, mpack_archive_path, False, [], options.force, options.verbose)
 
 def replay_mpack_logs():
   """

http://git-wip-us.apache.org/repos/asf/ambari/blob/772be786/ambari-server/src/test/python/TestAmbariServer.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py
index 9579c22..66b5ac5 100644
--- a/ambari-server/src/test/python/TestAmbariServer.py
+++ b/ambari-server/src/test/python/TestAmbariServer.py
@@ -85,7 +85,7 @@ with patch.object(platform, "linux_distribution", return_value = MagicMock(retur
                   print_info_msg, print_warning_msg, print_error_msg
                 from ambari_commons.os_utils import run_os_command, search_file, set_file_permissions, remove_file, copy_file, \
                   is_valid_filepath
-                from ambari_server.dbConfiguration import DBMSConfigFactory, check_jdbc_drivers
+                from ambari_server.dbConfiguration import DBMSConfigFactory, check_jdbc_drivers, DBMSConfig
                 from ambari_server.dbConfiguration_linux import PGConfig, LinuxDBMSConfig, OracleConfig
                 from ambari_server.properties import Properties
                 from ambari_server.resourceFilesKeeper import ResourceFilesKeeper, KeeperException
@@ -302,30 +302,27 @@ class TestAmbariServer(TestCase):
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "setup_security")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_setup_security(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                                    logger_mock, OptionParserMock,
-                                    setup_security_method):
-    opm = OptionParserMock.return_value
-    options = MagicMock()
-    args = ["setup-security"]
-    opm.parse_args.return_value = (options, args)
-    options.dbms = None
-    options.security_option = "setup-security"
-    options.sid_or_sname = "sid"
-    setup_security_method.return_value = None
+                                    logger_mock, setup_security_method):
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', 'setup-security', '--security-option=setup-security']
+      setup_security_method.return_value = None
 
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    _ambari_server_.mainBody()
-    self.assertTrue(setup_security_method.called)
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-    pass
+      _ambari_server_.mainBody()
+      self.assertTrue(setup_security_method.called)
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+    finally:
+      sys.argv = tmp_argv
+  pass
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "setup_ambari_krb5_jaas")
@@ -404,148 +401,147 @@ class TestAmbariServer(TestCase):
   @patch.object(_ambari_server_, "start")
   @patch.object(_ambari_server_, "stop")
   @patch.object(_ambari_server_, "reset")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_setup(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                           logger_mock, OptionParserMock, reset_method, stop_method,
+                           logger_mock, reset_method, stop_method,
                            start_method, setup_method, exit_mock):
-    opm = OptionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["setup"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ["ambari-server", "setup"]
 
-    options.dbms = None
-    options.sid_or_sname = "sid"
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertTrue(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
+      self.assertTrue(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
 
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
 
-    setup_method.reset_mock()
-    start_method.reset_mock()
-    stop_method.reset_mock()
-    reset_method.reset_mock()
-    exit_mock.reset_mock()
-    args = ["setup", "-v"]
-    options = self._create_empty_options_mock()
-    opm.parse_args.return_value = (options, args)
-    options.dbms = None
-    options.sid_or_sname = "sid"
-    setup_method.side_effect = Exception("Unexpected error")
-    try:
+      setup_method.reset_mock()
+      start_method.reset_mock()
+      stop_method.reset_mock()
+      reset_method.reset_mock()
+      exit_mock.reset_mock()
+      sys.argv = ["ambari-server", "setup", "-v"]
+      setup_method.side_effect = Exception("Unexpected error")
+      try:
+        _ambari_server_.mainBody()
+      except Exception:
+        self.assertTrue(True)
+      self.assertTrue(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
+      self.assertTrue(get_verbose())
+
+      setup_method.reset_mock()
+      start_method.reset_mock()
+      stop_method.reset_mock()
+      reset_method.reset_mock()
+      exit_mock.reset_mock()
+      sys.argv = ["ambari-server", "setup"]
+      setup_method.side_effect = Exception("Unexpected error")
       _ambari_server_.mainBody()
-    except Exception:
-      self.assertTrue(True)
-    self.assertTrue(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
-    self.assertTrue(get_verbose())
-
-    setup_method.reset_mock()
-    start_method.reset_mock()
-    stop_method.reset_mock()
-    reset_method.reset_mock()
-    exit_mock.reset_mock()
-    args = ["setup"]
-    options = self._create_empty_options_mock()
-    opm.parse_args.return_value = (options, args)
-    options.dbms = None
-    options.sid_or_sname = "sid"
-    options.verbose = False
-    setup_method.side_effect = Exception("Unexpected error")
-    _ambari_server_.mainBody()
-    self.assertTrue(exit_mock.called)
-    self.assertTrue(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
-    self.assertFalse(get_verbose())
+      self.assertTrue(exit_mock.called)
+      self.assertTrue(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
+      self.assertFalse(get_verbose())
 
-    pass
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
-  @patch.object(_ambari_server_, "setup")
-  @patch("optparse.OptionParser")
+  @patch.object(PGConfig, "_setup_local_server")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
-  def test_main_with_preset_dbms(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                                 logger_mock, optionParserMock, setup_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["setup"]
-    opm.parse_args.return_value = (options, args)
+  @patch("ambari_server.serverSetup.check_ambari_user")
+  @patch('ambari_server.serverSetup.download_and_install_jdk')
+  @patch("ambari_server.serverSetup.configure_os_settings")
+  @patch.object(DBMSConfig, "setup_database")
+  @patch("ambari_server.serverSetup.check_jdbc_drivers")
+  @patch("ambari_server.serverSetup.extract_views")
+  @patch("ambari_server.serverSetup.adjust_directory_permissions")
+  @patch("ambari_server.serverSetup.service_setup")
+  def test_main_with_preset_dbms(self, service_setup_mock, adjust_directory_permissions_mock, extract_views_mock, check_jdbc_drivers_mock, setup_database_mock, configure_os_settings_mock, download_and_install_jdk_mock, check_ambari_user_mock, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
+                                 logger_mock, setup_local_db_method):
+    extract_views_mock.return_value = 0
+    check_ambari_user_mock.return_value = (0, False, 'user', None)
+    configure_os_settings_mock.return_value = 0
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ["ambari-server", "setup", "-s"]
 
-    options.dbms = "sqlanywhere"
-    options.sid_or_sname = "sname"
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertTrue(setup_method.called)
-    self.assertEquals(options.database_index, 5)
-    pass
+      self.assertTrue(setup_local_db_method.called)
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "setup")
   @patch.object(_ambari_server_, "fix_database_options")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
-  def test_fix_database_options_called(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock, logger_mock, optionParserMock,
+  def test_fix_database_options_called(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock, logger_mock,
                                        fixDBOptionsMock, setup_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["setup"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', 'setup']
 
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertTrue(setup_method.called)
-    self.assertTrue(fixDBOptionsMock.called)
-    set_silent(False)
-    pass
+      self.assertTrue(setup_method.called)
+      self.assertTrue(fixDBOptionsMock.called)
+      set_silent(False)
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "setup")
   @patch.object(_ambari_server_, "start")
   @patch.object(_ambari_server_, "stop")
   @patch.object(_ambari_server_, "reset")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_start(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock, logger_mock,
-                           optionParserMock, reset_method, stop_method,
+                           reset_method, stop_method,
                            start_method, setup_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["setup"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "setup"]
 
-    options.dbms = None
-    options.sid_or_sname = "sname"
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertTrue(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
+      self.assertTrue(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
 
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-    pass
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   @not_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@@ -656,33 +652,32 @@ class TestAmbariServer(TestCase):
   @patch.object(_ambari_server_, "reset")
   @patch.object(_ambari_server_, "backup")
   @patch.object(_ambari_server_, "restore")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_backup(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock, logger_mock,
-                            optionParserMock, restore_mock, backup_mock, reset_method, stop_method,
+                            restore_mock, backup_mock, reset_method, stop_method,
                            start_method, setup_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["backup"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "backup"]
 
-    options.dbms = None
-    options.sid_or_sname = "sname"
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertTrue(backup_mock.called)
-    self.assertFalse(restore_mock.called)
-    self.assertFalse(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
+      self.assertTrue(backup_mock.called)
+      self.assertFalse(restore_mock.called)
+      self.assertFalse(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
 
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-    pass
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   #Restore is not yet supported on Windows
   @not_for_platform(PLATFORM_WINDOWS)
@@ -693,33 +688,31 @@ class TestAmbariServer(TestCase):
   @patch.object(_ambari_server_, "reset")
   @patch.object(_ambari_server_, "backup")
   @patch.object(_ambari_server_, "restore")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_restore(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock, logger_mock,
-                             optionParserMock, restore_mock, backup_mock, reset_method, stop_method,
+                             restore_mock, backup_mock, reset_method, stop_method,
                             start_method, setup_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["restore"]
-    opm.parse_args.return_value = (options, args)
-
-    options.dbms = None
-    options.sid_or_sname = "sname"
-    _ambari_server_.mainBody()
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "restore"]
+      _ambari_server_.mainBody()
 
-    self.assertTrue(restore_mock.called)
-    self.assertFalse(backup_mock.called)
-    self.assertFalse(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
+      self.assertTrue(restore_mock.called)
+      self.assertFalse(backup_mock.called)
+      self.assertFalse(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
 
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-    pass
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   @not_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@@ -791,32 +784,30 @@ class TestAmbariServer(TestCase):
   @patch.object(_ambari_server_, "start")
   @patch.object(_ambari_server_, "stop")
   @patch.object(_ambari_server_, "reset")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_reset(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                           logger_mock, optionParserMock, reset_method, stop_method,
+                           logger_mock, reset_method, stop_method,
                            start_method, setup_method):
-    opm = optionParserMock.return_value
-
-    options = self._create_empty_options_mock()
-    args = ["reset"]
-    opm.parse_args.return_value = (options, args)
-    options.dbms = None
-    options.sid_or_sname = "sid"
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "reset"]
 
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertFalse(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertTrue(reset_method.called)
+      self.assertFalse(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertTrue(reset_method.called)
 
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-    pass
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+      pass
+    finally:
+      sys.argv = tmp_argv
 
 
   @not_for_platform(PLATFORM_WINDOWS)
@@ -8497,64 +8488,84 @@ class TestAmbariServer(TestCase):
   @not_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "is_server_runing")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_status_running(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                                    logger_mock,  optionParserMock, is_server_runing_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    del options.exit_message
+                                    logger_mock, is_server_runing_method):
 
-    args = ["status"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "status"]
 
-    is_server_runing_method.return_value = (True, 100)
+      is_server_runing_method.return_value = (True, 100)
 
-    options.dbms = None
-    options.sid_or_sname = "sid"
 
-    try:
-      _ambari_server_.mainBody()
-    except SystemExit as e:
-      self.assertTrue(e.code == 0)
+      try:
+        _ambari_server_.mainBody()
+      except SystemExit as e:
+        self.assertTrue(e.code == 0)
 
-    self.assertTrue(is_server_runing_method.called)
-    pass
+      self.assertTrue(is_server_runing_method.called)
+      pass
+    finally:
+      sys.argv = tmp_argv
 
 
   @not_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "is_server_runing")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_status_not_running(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                                        logger_mock, optionParserMock, is_server_runing_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    del options.exit_message
+                                        logger_mock, is_server_runing_method):
 
-    args = ["status"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "status"]
 
-    is_server_runing_method.return_value = (False, None)
+      is_server_runing_method.return_value = (False, None)
 
-    options.dbms = None
-    options.sid_or_sname = "sid"
+      try:
+        _ambari_server_.mainBody()
+      except SystemExit as e:
+        self.assertTrue(e.code == 3)
 
+      self.assertTrue(is_server_runing_method.called)
+      pass
+    finally:
+      sys.argv = tmp_argv
+
+  @not_for_platform(PLATFORM_WINDOWS)
+  @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
+  @patch.object(_ambari_server_, "logger")
+  @patch("ambari_server.serverConfiguration.get_ambari_properties")
+  @patch.object(_ambari_server_, "setup_logging")
+  @patch.object(_ambari_server_, "init_logging")
+  def test_status_extra_option(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
+                                        logger_mock):
+
+    import sys
+    tmp_argv = sys.argv
     try:
-      _ambari_server_.mainBody()
-    except SystemExit as e:
-      self.assertTrue(e.code == 3)
+      sys.argv = ['ambari-server', "status", '--skip-database-check']
+      flag = False
+      try:
+        _ambari_server_.mainBody()
+      except SystemExit as e:
+        self.assertEquals(e.code, 2)
+        flag = True
 
-    self.assertTrue(is_server_runing_method.called)
-    pass
+      self.assertTrue(flag)
 
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   def test_web_server_startup_timeout(self):
     from ambari_server.serverConfiguration import get_web_server_startup_timeout


[16/50] [abbrv] ambari git commit: AMBARI-21048. HDP 3.0 TP - create service definition for Storm with configs, kerberos, widgets, etc.(vbrodetsky)

Posted by ad...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-worker-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-worker-log4j.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-worker-log4j.xml
new file mode 100644
index 0000000..46291f7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-worker-log4j.xml
@@ -0,0 +1,189 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="false">
+  <property>
+    <name>storm_wrkr_a1_maxfilesize</name>
+    <value>100</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Storm Worker Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>storm_wrkr_a1_maxbackupindex</name>
+    <value>9</value>
+    <description>The number of backup files</description>
+    <display-name>Storm Worker Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>storm_wrkr_out_maxfilesize</name>
+    <value>100</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Storm Worker Standard out Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>storm_wrkr_out_maxbackupindex</name>
+    <value>4</value>
+    <description>The number of backup files</description>
+    <display-name>Storm Worker Standard out Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>storm_wrkr_err_maxfilesize</name>
+    <value>100</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Storm Worker Standard Error Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>storm_wrkr_err_maxbackupindex</name>
+    <value>4</value>
+    <description>The number of backup files</description>
+    <display-name>Storm Worker Standard Error Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>storm-worker-log4j template</display-name>
+    <description>Custom worker.xml</description>
+    <value><![CDATA[
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<configuration monitorInterval="60">
+<properties>
+    <property name="pattern">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} %t [%p] %msg%n</property>
+    <property name="patternNoTime">%msg%n</property>
+    <property name="patternMetrics">%d %-8r %m%n</property>
+</properties>
+<appenders>
+    <RollingFile name="A1"
+		fileName="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}"
+		filePattern="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.%i.gz">
+        <PatternLayout>
+            <pattern>${pattern}</pattern>
+        </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="{{storm_wrkr_a1_maxfilesize}} MB"/> <!-- Or every 100 MB -->
+        </Policies>
+        <DefaultRolloverStrategy max="{{storm_wrkr_a1_maxbackupindex}}"/>
+    </RollingFile>
+    <RollingFile name="STDOUT"
+		fileName="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.out"
+		filePattern="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.out.%i.gz">
+        <PatternLayout>
+            <pattern>${patternNoTime}</pattern>
+        </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="{{storm_wrkr_out_maxfilesize}} MB"/> <!-- Or every 100 MB -->
+        </Policies>
+        <DefaultRolloverStrategy max="{{storm_wrkr_out_maxbackupindex}}"/>
+    </RollingFile>
+    <RollingFile name="STDERR"
+		fileName="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.err"
+		filePattern="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.err.%i.gz">
+        <PatternLayout>
+            <pattern>${patternNoTime}</pattern>
+        </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="{{storm_wrkr_err_maxfilesize}} MB"/> <!-- Or every 100 MB -->
+        </Policies>
+        <DefaultRolloverStrategy max="{{storm_wrkr_err_maxbackupindex}}"/>
+    </RollingFile>
+    <RollingFile name="METRICS"
+		fileName="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.metrics"
+		filePattern="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.metrics.%i.gz">
+        <PatternLayout>
+            <pattern>${patternMetrics}</pattern>
+        </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="2 MB"/>
+        </Policies>
+        <DefaultRolloverStrategy max="9"/>
+    </RollingFile>
+    <Syslog name="syslog" format="RFC5424" charset="UTF-8" host="localhost" port="514"
+        protocol="UDP" appName="[${sys:storm.id}:${sys:worker.port}]" mdcId="mdc" includeMDC="true"
+        facility="LOCAL5" enterpriseNumber="18060" newLine="true" exceptionPattern="%rEx{full}"
+        messageId="[${sys:user.name}:${sys:logging.sensitivity}]" id="storm" immediateFail="true" immediateFlush="true"/>
+</appenders>
+<loggers>
+    <root level="info"> <!-- We log everything -->
+        <appender-ref ref="A1"/>
+        <appender-ref ref="syslog"/>
+    </root>
+    <Logger name="org.apache.storm.metric.LoggingMetricsConsumer" level="info" additivity="false">
+        <appender-ref ref="METRICS"/>
+    </Logger>
+    <Logger name="STDERR" level="INFO">
+        <appender-ref ref="STDERR"/>
+        <appender-ref ref="syslog"/>
+    </Logger>
+    <Logger name="STDOUT" level="INFO">
+        <appender-ref ref="STDOUT"/>
+        <appender-ref ref="syslog"/>
+    </Logger>
+</loggers>
+</configuration>
+    ]]></value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/kerberos.json
new file mode 100644
index 0000000..a034411
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/kerberos.json
@@ -0,0 +1,134 @@
+{
+  "services": [
+    {
+      "name": "STORM",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "name": "storm_components",
+          "principal": {
+            "value": "${storm-env/storm_user}${principal_suffix}@${realm}",
+            "type": "user",
+            "configuration": "storm-env/storm_principal_name"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/storm.headless.keytab",
+            "owner": {
+              "name": "${storm-env/storm_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": ""
+            },
+            "configuration": "storm-env/storm_keytab"
+          }
+        },
+        {
+          "name": "/STORM/storm_components",
+          "principal": {
+            "configuration": "storm-atlas-application.properties/atlas.jaas.KafkaClient.option.principal"
+          },
+          "keytab": {
+            "configuration": "storm-atlas-application.properties/atlas.jaas.KafkaClient.option.keyTab"
+          }
+        }
+      ],
+      "configurations": [
+        {
+          "storm-site": {
+            "nimbus.authorizer": "org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer",
+            "drpc.authorizer": "org.apache.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer",
+            "ui.filter": "org.apache.hadoop.security.authentication.server.AuthenticationFilter",
+            "storm.principal.tolocal": "org.apache.storm.security.auth.KerberosPrincipalToLocal",
+            "supervisor.enable": "true",
+            "storm.zookeeper.superACL": "sasl:{{storm_bare_jaas_principal}}",
+            "java.security.auth.login.config": "{{conf_dir}}/storm_jaas.conf",
+            "nimbus.impersonation.authorizer": "org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer",
+            "nimbus.impersonation.acl": "{ {{storm_bare_jaas_principal}} : {hosts: ['*'], groups: ['*']}}",
+            "nimbus.admins": "['{{storm_bare_jaas_principal}}', '{{ambari_bare_jaas_principal}}']",
+            "nimbus.supervisor.users": "['{{storm_bare_jaas_principal}}']",
+            "ui.filter.params": "{'type': 'kerberos', 'kerberos.principal': '{{storm_ui_jaas_principal}}', 'kerberos.keytab': '{{storm_ui_keytab_path}}', 'kerberos.name.rules': 'DEFAULT'}"
+          }
+        },
+        {
+          "ranger-storm-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "STORM_UI_SERVER",
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "storm-env/storm_ui_principal_name"
+              },
+              "keytab": {
+                "configuration": "storm-env/storm_ui_keytab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "NIMBUS",
+          "identities": [
+            {
+              "name": "nimbus_server",
+              "principal": {
+                "value": "nimbus/_HOST@${realm}",
+                "type": "service",
+                "configuration": "storm-env/nimbus_principal_name"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nimbus.service.keytab",
+                "owner": {
+                  "name": "${storm-env/storm_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "storm-env/nimbus_keytab"
+              }
+            },
+            {
+              "name": "/STORM/storm_components",
+              "principal": {
+                "configuration": "ranger-storm-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-storm-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "DRPC_SERVER",
+          "identities": [
+            {
+              "name": "drpc_server",
+              "reference": "/STORM/NIMBUS/nimbus_server"
+            }
+          ]
+        },
+        {
+          "name" : "SUPERVISOR"
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/metainfo.xml
new file mode 100644
index 0000000..1bc23e4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/metainfo.xml
@@ -0,0 +1,179 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>STORM</name>
+      <displayName>Storm</displayName>
+      <comment>Apache Hadoop Stream processing framework</comment>
+      <version>1.0.1.3.0</version>
+      <components>
+
+        <component>
+          <name>NIMBUS</name>
+          <displayName>Nimbus</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/nimbus.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>storm_nimbus</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>SUPERVISOR</name>
+          <displayName>Supervisor</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/supervisor.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <bulkCommands>
+            <displayName>Supervisors</displayName>
+            <masterComponent>SUPERVISOR</masterComponent>
+          </bulkCommands>
+          <logs>
+            <log>
+              <logId>storm_supervisor</logId>
+              <primary>true</primary>
+            </log>
+            <log>
+              <logId>storm_worker</logId>
+            </log>
+            <log>
+              <logId>storm_logviewer</logId>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>STORM_UI_SERVER</name>
+          <displayName>Storm UI Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/ui_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>storm_ui</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>DRPC_SERVER</name>
+          <displayName>DRPC Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/drpc_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>storm_drpc</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>storm_${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>storm-${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>storm-site</config-type>
+        <config-type>storm-env</config-type>
+        <config-type>ranger-storm-plugin-properties</config-type>
+        <config-type>ranger-storm-audit</config-type>
+        <config-type>ranger-storm-policymgr-ssl</config-type>
+        <config-type>ranger-storm-security</config-type>
+        <config-type>admin-properties</config-type>
+        <config-type>ranger-ugsync-site</config-type>
+        <config-type>ranger-admin-site</config-type>
+        <config-type>zookeeper-env</config-type>
+        <config-type>zoo.cfg</config-type>
+        <config-type>application.properties</config-type>
+        <config-type>storm-atlas-application.properties</config-type>
+      </configuration-dependencies>
+
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/metrics.json b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/metrics.json
new file mode 100644
index 0000000..2c27d58
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/metrics.json
@@ -0,0 +1,1202 @@
+{
+  "STORM_UI_SERVER": {
+    "Component": [
+      {
+        "type": "org.apache.ambari.server.controller.metrics.RestMetricsPropertyProvider",
+        "properties": {
+          "default_port": "8744",
+          "port_config_type": "storm-site",
+          "port_property_name": "ui.port",
+          "protocol": "http",
+          "https_port_property_name" : "ui.https.port",
+          "https_property_name" : "ui.https.keystore.type"
+        },
+        "metrics": {
+          "default": {
+            "metrics/api/v1/cluster/summary/tasksTotal": {
+              "metric": "/api/v1/cluster/summary##tasksTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/topology/summary": {
+              "metric": "/api/v1/topology/summary?field=topologies##topologies",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/slotsTotal": {
+              "metric": "/api/v1/cluster/summary##slotsTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/slotsFree": {
+              "metric": "/api/v1/cluster/summary##slotsFree",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/supervisors": {
+              "metric": "/api/v1/cluster/summary##supervisors",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/executorsTotal": {
+              "metric": "/api/v1/cluster/summary##executorsTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/slotsUsed": {
+              "metric": "/api/v1/cluster/summary##slotsUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+              "metrics/api/v1/nimbus/summary": {
+              "metric": "/api/v1/nimbus/summary?field=nimbuses##nimbuses",
+              "pointInTime": true,
+              "temporal": false
+          }
+
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "org.apache.ambari.server.controller.metrics.RestMetricsPropertyProvider",
+        "properties": {
+          "default_port": "8744",
+          "port_config_type": "storm-site",
+          "port_property_name": "ui.port",
+          "protocol": "http",
+          "https_port_property_name" : "ui.https.port",
+          "https_property_name" : "ui.https.keystore.type"
+        },
+        "metrics": {
+          "default": {
+            "metrics/api/v1/cluster/summary/tasksTotal": {
+              "metric": "/api/v1/cluster/summary##tasksTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/topology/summary": {
+              "metric": "/api/v1/topology/summary?field=topologies##topologies",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/slotsTotal": {
+              "metric": "/api/v1/cluster/summary##slotsTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/slotsFree": {
+              "metric": "/api/v1/cluster/summary##slotsFree",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/supervisors": {
+              "metric": "/api/v1/cluster/summary##supervisors",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/executorsTotal": {
+              "metric": "/api/v1/cluster/summary##executorsTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/slotsUsed": {
+              "metric": "/api/v1/cluster/summary##slotsUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/nimbus/summary": {
+              "metric": "/api/v1/nimbus/summary?field=nimbuses##nimbuses",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ]
+  },
+  "NIMBUS": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_idle": {
+              "metric": "cpu_idle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice": {
+              "metric": "cpu_nice",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+
+            "metrics/cpu/cpu_num": {
+              "metric": "cpu_num",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_speed": {
+              "metric": "cpu_speed",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system": {
+              "metric": "cpu_system",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user": {
+              "metric": "cpu_user",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio": {
+              "metric": "cpu_wio",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free": {
+              "metric": "disk_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total": {
+              "metric": "disk_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached": {
+              "metric": "mem_cached",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free": {
+              "metric": "mem_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared": {
+              "metric": "mem_shared",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total": {
+              "metric": "mem_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free": {
+              "metric": "swap_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/committed": {
+              "metric": "Nimbus.JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/init": {
+              "metric": "Nimbus.JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/max": {
+              "metric": "Nimbus.JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/used": {
+              "metric": "Nimbus.JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/committed": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/init": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/max": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/used": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/os/processcputime": {
+              "metric": "Nimbus.JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/threading/daemonthreadcount": {
+              "metric": "Nimbus.JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/threading/threadcount": {
+              "metric": "Nimbus.JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+
+            "metrics/storm/nimbus/freeslots": {
+              "metric": "Free Slots",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/supervisors": {
+              "metric": "Supervisors",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/topologies": {
+              "metric": "Topologies",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/totalexecutors": {
+              "metric": "Total Executors",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/totalslots": {
+              "metric": "Total Slots",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/totaltasks": {
+              "metric": "Total Tasks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/usedslots": {
+              "metric": "Used Slots",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_idle": {
+              "metric": "cpu_idle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice": {
+              "metric": "cpu_nice",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+
+            "metrics/cpu/cpu_num": {
+              "metric": "cpu_num",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_speed": {
+              "metric": "cpu_speed",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system": {
+              "metric": "cpu_system",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user": {
+              "metric": "cpu_user",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio": {
+              "metric": "cpu_wio",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free": {
+              "metric": "disk_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total": {
+              "metric": "disk_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached": {
+              "metric": "mem_cached",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free": {
+              "metric": "mem_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared": {
+              "metric": "mem_shared",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total": {
+              "metric": "mem_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free": {
+              "metric": "swap_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/committed": {
+              "metric": "Nimbus.JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/init": {
+              "metric": "Nimbus.JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/max": {
+              "metric": "Nimbus.JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/used": {
+              "metric": "Nimbus.JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/committed": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/init": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/max": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/used": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/os/processcputime": {
+              "metric": "Nimbus.JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/threading/daemonthreadcount": {
+              "metric": "Nimbus.JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/threading/threadcount": {
+              "metric": "Nimbus.JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            }
+
+          }
+        }
+      }
+    ]
+  },
+  "SUPERVISOR": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_idle": {
+              "metric": "cpu_idle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice": {
+              "metric": "cpu_nice",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+
+            "metrics/cpu/cpu_num": {
+              "metric": "cpu_num",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_speed": {
+              "metric": "cpu_speed",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system": {
+              "metric": "cpu_system",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user": {
+              "metric": "cpu_user",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio": {
+              "metric": "cpu_wio",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free": {
+              "metric": "disk_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total": {
+              "metric": "disk_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached": {
+              "metric": "mem_cached",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free": {
+              "metric": "mem_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared": {
+              "metric": "mem_shared",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total": {
+              "metric": "mem_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free": {
+              "metric": "swap_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/committed": {
+              "metric": "Supervisor.JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/init": {
+              "metric": "Supervisor.JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/max": {
+              "metric": "Supervisor.JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/used": {
+              "metric": "Supervisor.JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/committed": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/init": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/max": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/used": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/os/processcputime": {
+              "metric": "Supervisor.JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/threading/daemonthreadcount": {
+              "metric": "Supervisor.JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/threading/threadcount": {
+              "metric": "Supervisor.JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/committed": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/init": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/max": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/used": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/committed": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/init": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/max": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/used": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/os/processcputime": {
+              "metric": "Worker.(.+).JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/threading/daemonthreadcount": {
+              "metric": "Worker.(.+).JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/threading/threadcount": {
+              "metric": "Worker.(.+).JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            }
+
+
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_idle": {
+              "metric": "cpu_idle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice": {
+              "metric": "cpu_nice",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+
+            "metrics/cpu/cpu_num": {
+              "metric": "cpu_num",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_speed": {
+              "metric": "cpu_speed",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system": {
+              "metric": "cpu_system",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user": {
+              "metric": "cpu_user",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio": {
+              "metric": "cpu_wio",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free": {
+              "metric": "disk_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total": {
+              "metric": "disk_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached": {
+              "metric": "mem_cached",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free": {
+              "metric": "mem_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared": {
+              "metric": "mem_shared",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total": {
+              "metric": "mem_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free": {
+              "metric": "swap_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/committed": {
+              "metric": "Supervisor.JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/init": {
+              "metric": "Supervisor.JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/max": {
+              "metric": "Supervisor.JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/used": {
+              "metric": "Supervisor.JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/committed": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/init": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/max": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/used": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/os/processcputime": {
+              "metric": "Supervisor.JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/threading/daemonthreadcount": {
+              "metric": "Supervisor.JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/threading/threadcount": {
+              "metric": "Supervisor.JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/committed": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/init": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/max": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/used": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/committed": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/init": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/max": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/used": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/os/processcputime": {
+              "metric": "Worker.(.+).JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/threading/daemonthreadcount": {
+              "metric": "Worker.(.+).JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/threading/threadcount": {
+              "metric": "Worker.(.+).JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/alerts/check_supervisor_process_win.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/alerts/check_supervisor_process_win.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/alerts/check_supervisor_process_win.py
new file mode 100644
index 0000000..a698415
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/alerts/check_supervisor_process_win.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.libraries.functions import check_windows_service_status
+
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return ()
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  try:
+    check_windows_service_status("supervisor")
+    return (RESULT_CODE_OK, ["Supervisor is running"])
+  except:
+    return (RESULT_CODE_CRITICAL, ["Supervisor is stopped"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/files/wordCount.jar
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/files/wordCount.jar b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/files/wordCount.jar
new file mode 100644
index 0000000..aed64be
Binary files /dev/null and b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/files/wordCount.jar differ

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/drpc_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/drpc_server.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/drpc_server.py
new file mode 100644
index 0000000..f991e71
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/drpc_server.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from storm import storm
+from service import service
+from service_check import ServiceCheck
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_JAAS_CONF
+
+class DrpcServer(Script):
+
+  def get_component_name(self):
+    return "storm-client"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    storm()
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "storm", params.version)
+      stack_select.select("storm-client", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("drpc", action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    service("drpc", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_drpc)
+      
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.storm_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_drpc]
+
+if __name__ == "__main__":
+  DrpcServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus.py
new file mode 100644
index 0000000..360af5d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from storm import storm
+from service import service
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_JAAS_CONF
+from setup_ranger_storm import setup_ranger_storm
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.resources.service import Service
+
+class Nimbus(Script):
+  def get_component_name(self):
+    return "storm-nimbus"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    storm("nimbus")
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class NimbusDefault(Nimbus):
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "storm", params.version)
+      stack_select.select("storm-client", params.version)
+      stack_select.select("storm-nimbus", params.version)
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    setup_ranger_storm(upgrade_type=upgrade_type)
+    service("nimbus", action="start")
+
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    service("nimbus", action="stop")
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_nimbus)
+
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.storm_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_nimbus]
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class NimbusWindows(Nimbus):
+  def start(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.nimbus_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.nimbus_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+    env.set_params(status_params)
+    check_windows_service_status(status_params.nimbus_win_service_name)
+
+if __name__ == "__main__":
+  Nimbus().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus_prod.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus_prod.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus_prod.py
new file mode 100644
index 0000000..39bda4d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus_prod.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script import Script
+from storm import storm
+from supervisord_service import supervisord_service, supervisord_check_status
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+
+class Nimbus(Script):
+
+  def get_component_name(self):
+    return "storm-nimbus"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    storm()
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "storm", params.version)
+      stack_select.select("storm-client", params.version)
+      stack_select.select("storm-nimbus", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    supervisord_service("nimbus", action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    supervisord_service("nimbus", action="stop")
+
+  def status(self, env):
+    supervisord_check_status("nimbus")
+    
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.storm_user
+
+if __name__ == "__main__":
+  Nimbus().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/pacemaker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/pacemaker.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/pacemaker.py
new file mode 100644
index 0000000..fa3112d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/pacemaker.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from storm import storm
+from service import service
+from service_check import ServiceCheck
+from resource_management.libraries.functions.security_commons import build_expectations, \
+    cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+    FILE_TYPE_JAAS_CONF
+
+class PaceMaker(Script):
+
+  def get_component_name(self):
+      return "storm-client"
+
+  def install(self, env):
+      self.install_packages(env)
+      self.configure(env)
+
+  def configure(self, env):
+      import params
+      env.set_params(params)
+      storm()
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+      import params
+      env.set_params(params)
+
+      if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+        conf_select.select(params.stack_name, "storm", params.version)
+        stack_select.select("storm-client", params.version)
+
+  def start(self, env, upgrade_type=None):
+      import params
+      env.set_params(params)
+      self.configure(env)
+
+      service("pacemaker", action="start")
+
+  def stop(self, env, upgrade_type=None):
+      import params
+      env.set_params(params)
+
+      service("pacemaker", action="stop")
+
+  def status(self, env):
+      import status_params
+      env.set_params(status_params)
+      check_process_status(status_params.pid_pacemaker)
+
+  def get_log_folder(self):
+      import params
+      return params.log_dir
+
+  def get_user(self):
+      import params
+      return params.storm_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_pacemaker]
+
+if __name__ == "__main__":
+    PaceMaker().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params.py
new file mode 100644
index 0000000..5d53de8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+retryAble = default("/commandParams/command_retry_enabled", False)


[20/50] [abbrv] ambari git commit: AMBARI-21055: Search from Storm Ambari View Broken. (Sanket Shah via Sriharsha Chintalapani)

Posted by ad...@apache.org.
AMBARI-21055: Search from Storm Ambari View Broken. (Sanket Shah via
Sriharsha Chintalapani)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/71ed2814
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/71ed2814
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/71ed2814

Branch: refs/heads/ambari-rest-api-explorer
Commit: 71ed28140cd5d91fedb4e1403a8fa9aa3482ccf0
Parents: 3499004
Author: Sriharsha Chintalapani <ha...@hortonworks.com>
Authored: Thu May 18 14:04:38 2017 -0700
Committer: Sriharsha Chintalapani <ha...@hortonworks.com>
Committed: Thu May 18 14:04:38 2017 -0700

----------------------------------------------------------------------
 .../ambari/storm/StormDetailsServlet.java       | 81 ++++++++++++++++++++
 .../storm/src/main/resources/WEB-INF/web.xml    |  8 ++
 .../resources/scripts/components/SearchLogs.jsx | 38 +++++----
 3 files changed, 110 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/71ed2814/contrib/views/storm/src/main/java/org/apache/ambari/storm/StormDetailsServlet.java
----------------------------------------------------------------------
diff --git a/contrib/views/storm/src/main/java/org/apache/ambari/storm/StormDetailsServlet.java b/contrib/views/storm/src/main/java/org/apache/ambari/storm/StormDetailsServlet.java
new file mode 100644
index 0000000..42c3277
--- /dev/null
+++ b/contrib/views/storm/src/main/java/org/apache/ambari/storm/StormDetailsServlet.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.storm;
+
+import org.apache.ambari.view.ViewContext;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import javax.servlet.ServletConfig;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.net.URLDecoder;
+import java.nio.charset.StandardCharsets;
+import java.util.*;
+import java.io.*;
+
+/**
+ * Simple servlet for proxying requests with doAs impersonation.
+ */
+public class StormDetailsServlet extends HttpServlet {
+
+  private ViewContext viewContext;
+  private static final String STORM_HOST = "storm.host";
+  private static final String STORM_PORT = "storm.port";
+  private static final String STORM_SSL_ENABLED = "storm.sslEnabled";
+  private String stormURL;
+
+  @Override
+  public void init(ServletConfig config) throws ServletException {
+    super.init(config);
+
+    ServletContext context = config.getServletContext();
+    viewContext = (ViewContext) context.getAttribute(ViewContext.CONTEXT_ATTRIBUTE);
+    String sslEnabled = viewContext.getProperties().get(STORM_SSL_ENABLED);
+    String hostname = viewContext.getProperties().get(STORM_HOST);
+    String port = viewContext.getProperties().get(STORM_PORT);
+    stormURL = (sslEnabled.equals("true") ? "https" : "http") + "://" + hostname + ":" + port;
+  }
+
+  @Override
+  protected void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException {
+    String hostDetails = "{\"hostdata\":\""+stormURL+"\"}";
+    InputStream resultStream = new ByteArrayInputStream(hostDetails.getBytes(StandardCharsets.UTF_8));
+    this.setResponse(request, response, resultStream);
+  }
+
+  /**
+   * Set response to the get/post request
+   * @param request      HttpServletRequest
+   * @param response     HttpServletResponse
+   * @param resultStream InputStream
+   */
+  public void setResponse(HttpServletRequest request, HttpServletResponse response, InputStream resultStream) throws IOException{
+    Scanner scanner = new Scanner(resultStream).useDelimiter("\\A");
+    String result = scanner.hasNext() ? scanner.next() : "";
+    Boolean notFound = result == "" || result.indexOf("\"exception\":\"NotFoundException\"") != -1;
+    response.setContentType(request.getContentType());
+    response.setStatus(notFound ? HttpServletResponse.SC_NOT_FOUND : HttpServletResponse.SC_OK);
+    PrintWriter writer = response.getWriter();
+    writer.print(result);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/71ed2814/contrib/views/storm/src/main/resources/WEB-INF/web.xml
----------------------------------------------------------------------
diff --git a/contrib/views/storm/src/main/resources/WEB-INF/web.xml b/contrib/views/storm/src/main/resources/WEB-INF/web.xml
index e406de1..cc89ac7 100644
--- a/contrib/views/storm/src/main/resources/WEB-INF/web.xml
+++ b/contrib/views/storm/src/main/resources/WEB-INF/web.xml
@@ -30,8 +30,16 @@ limitations under the License. Kerberos, LDAP, Custom. Binary/Htt
     <servlet-name>ProxyServlet</servlet-name>
     <servlet-class>org.apache.ambari.view.storm.ProxyServlet</servlet-class>
   </servlet>
+  <servlet>
+    <servlet-name>StormDetailsServlet</servlet-name>
+    <servlet-class>org.apache.ambari.view.storm.StormDetailsServlet</servlet-class>
+  </servlet>
   <servlet-mapping>
     <servlet-name>ProxyServlet</servlet-name>
     <url-pattern>/proxy</url-pattern>
   </servlet-mapping>
+  <servlet-mapping>
+    <servlet-name>StormDetailsServlet</servlet-name>
+    <url-pattern>/storm_details</url-pattern>
+  </servlet-mapping>
 </web-app>

http://git-wip-us.apache.org/repos/asf/ambari/blob/71ed2814/contrib/views/storm/src/main/resources/scripts/components/SearchLogs.jsx
----------------------------------------------------------------------
diff --git a/contrib/views/storm/src/main/resources/scripts/components/SearchLogs.jsx b/contrib/views/storm/src/main/resources/scripts/components/SearchLogs.jsx
index b37170c..1581a16 100644
--- a/contrib/views/storm/src/main/resources/scripts/components/SearchLogs.jsx
+++ b/contrib/views/storm/src/main/resources/scripts/components/SearchLogs.jsx
@@ -25,7 +25,7 @@ define(['react',
 		getInitialState: function() {
 			return null;
 		},
-		render: function() {			
+		render: function() {
 			return (
 				<div className="col-md-3 pull-right searchbar">
                     <div className="input-group">
@@ -60,26 +60,30 @@ define(['react',
             var searchBoxEl = document.getElementById('searchBox');
             var searchArchivedLogsEl = document.getElementById('searchArchivedLogs');
             var deepSearchEl = document.getElementById('deepSearch');
+            var topologyId = this.props.id;
 
-            var url = App.baseURL.split('?url=')[1]+'/';
-            if(deepSearchEl.checked == true){
-                url += "deep_search_result.html";
-            }else{
-                url += "search_result.html";
-            }
-            url += '?search='+searchBoxEl.value+'&id='+ this.props.id +'&count=1';
-            if(searchArchivedLogsEl.checked == true){
+            $.get(App.baseURL.replace('proxy?url=', 'storm_details'))
+              .success(function(response){
+                var url = JSON.parse(response).hostdata+'/';
                 if(deepSearchEl.checked == true){
-                    url += "&search-archived=on";
+                    url += "deep_search_result.html";
                 }else{
-                    url += "&searchArchived=checked";
+                    url += "search_result.html";
                 }
-            }
-            window.open(url, '_blank');
+                url += '?search='+searchBoxEl.value+'&id='+ topologyId +'&count=1';
+                if(searchArchivedLogsEl.checked == true){
+                    if(deepSearchEl.checked == true){
+                        url += "&search-archived=on";
+                    }else{
+                        url += "&searchArchived=checked";
+                    }
+                }
+                window.open(url, '_blank');
 
-            searchBoxEl.value = '';
-            searchArchivedLogsEl.checked = false;
-            deepSearchEl.checked = false;
+                searchBoxEl.value = '';
+                searchArchivedLogsEl.checked = false;
+                deepSearchEl.checked = false;
+              });
         },
-    }); 
+    });
 });


[31/50] [abbrv] ambari git commit: AMBARI-21071. Ambari Infra Manager: add jobs/steps REST API endpoints (oleewere)

Posted by ad...@apache.org.
AMBARI-21071. Ambari Infra Manager: add jobs/steps REST API endpoints (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9ffef7fc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9ffef7fc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9ffef7fc

Branch: refs/heads/ambari-rest-api-explorer
Commit: 9ffef7fc58b4313dfdf8d96badce5198855934a7
Parents: c23602c
Author: oleewere <ol...@gmail.com>
Authored: Fri May 19 14:41:42 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Sat May 20 12:51:51 2017 +0200

----------------------------------------------------------------------
 .../org/apache/ambari/infra/InfraManager.java   |   1 +
 .../conf/batch/InfraManagerBatchConfig.java     |  55 ++++
 .../apache/ambari/infra/manager/JobManager.java | 274 +++++++++++++++++++
 .../infra/model/ExecutionContextResponse.java   |  40 +++
 .../ambari/infra/model/JobDetailsResponse.java  |  53 ++++
 .../model/JobExecutionDetailsResponse.java      |  49 ++++
 .../infra/model/JobExecutionInfoResponse.java   | 141 ++++++++++
 .../ambari/infra/model/JobExecutionRequest.java |  46 ++++
 .../infra/model/JobExecutionRestartRequest.java |  52 ++++
 .../infra/model/JobExecutionStopRequest.java    |  50 ++++
 .../infra/model/JobInstanceDetailsResponse.java |  54 ++++
 .../infra/model/JobInstanceStartRequest.java    |  49 ++++
 .../ambari/infra/model/JobOperationParams.java  |  31 +++
 .../apache/ambari/infra/model/JobRequest.java   |  37 +++
 .../apache/ambari/infra/model/PageRequest.java  |  49 ++++
 .../model/StepExecutionContextResponse.java     |  58 ++++
 .../infra/model/StepExecutionInfoResponse.java  | 115 ++++++++
 .../model/StepExecutionProgressResponse.java    |  53 ++++
 .../infra/model/StepExecutionRequest.java       |  49 ++++
 .../infra/model/wrapper/JobExecutionData.java   | 118 ++++++++
 .../infra/model/wrapper/StepExecutionData.java  | 133 +++++++++
 .../ambari/infra/rest/JobExceptionMapper.java   | 110 ++++++++
 .../apache/ambari/infra/rest/JobResource.java   | 151 ++++++++--
 23 files changed, 1748 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java
index 656127e..227bab4 100644
--- a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java
@@ -142,6 +142,7 @@ public class InfraManager {
     ServletHolder jerseyServlet = context.addServlet(org.glassfish.jersey.servlet.ServletContainer.class, "/api/v1/*");
     jerseyServlet.setInitOrder(1);
     jerseyServlet.setInitParameter("jersey.config.server.provider.packages","org.apache.ambari.infra.rest,io.swagger.jaxrs.listing");
+
     context.getSessionHandler().getSessionManager().setMaxInactiveInterval(SESSION_TIMEOUT);
     context.getSessionHandler().getSessionManager().getSessionCookieConfig().setName(INFRA_MANAGER_SESSION_ID);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java
index 7310626..c3d8db6 100644
--- a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java
@@ -21,6 +21,14 @@ package org.apache.ambari.infra.conf.batch;
 import org.apache.ambari.infra.job.dummy.DummyItemProcessor;
 import org.apache.ambari.infra.job.dummy.DummyItemWriter;
 import org.apache.ambari.infra.job.dummy.DummyObject;
+import org.springframework.batch.admin.service.JdbcSearchableJobExecutionDao;
+import org.springframework.batch.admin.service.JdbcSearchableJobInstanceDao;
+import org.springframework.batch.admin.service.JdbcSearchableStepExecutionDao;
+import org.springframework.batch.admin.service.JobService;
+import org.springframework.batch.admin.service.SearchableJobExecutionDao;
+import org.springframework.batch.admin.service.SearchableJobInstanceDao;
+import org.springframework.batch.admin.service.SearchableStepExecutionDao;
+import org.springframework.batch.admin.service.SimpleJobService;
 import org.springframework.batch.core.Job;
 import org.springframework.batch.core.Step;
 import org.springframework.batch.core.configuration.JobRegistry;
@@ -34,6 +42,9 @@ import org.springframework.batch.core.launch.JobOperator;
 import org.springframework.batch.core.launch.support.SimpleJobLauncher;
 import org.springframework.batch.core.launch.support.SimpleJobOperator;
 import org.springframework.batch.core.repository.JobRepository;
+import org.springframework.batch.core.repository.dao.DefaultExecutionContextSerializer;
+import org.springframework.batch.core.repository.dao.ExecutionContextDao;
+import org.springframework.batch.core.repository.dao.JdbcExecutionContextDao;
 import org.springframework.batch.core.repository.support.JobRepositoryFactoryBean;
 import org.springframework.batch.item.ItemProcessor;
 import org.springframework.batch.item.ItemReader;
@@ -53,6 +64,7 @@ import org.springframework.context.annotation.Configuration;
 import org.springframework.core.io.ClassPathResource;
 import org.springframework.core.io.Resource;
 import org.springframework.core.task.SimpleAsyncTaskExecutor;
+import org.springframework.jdbc.core.JdbcTemplate;
 import org.springframework.jdbc.datasource.DriverManagerDataSource;
 import org.springframework.jdbc.datasource.init.DataSourceInitializer;
 import org.springframework.jdbc.datasource.init.ResourceDatabasePopulator;
@@ -166,6 +178,49 @@ public class InfraManagerBatchConfig {
     return jobRegistryBeanPostProcessor;
   }
 
+  @Bean
+  public JdbcTemplate jdbcTemplate() {
+    return new JdbcTemplate(dataSource());
+  }
+
+  @Bean
+  public SearchableJobInstanceDao searchableJobInstanceDao() {
+    JdbcSearchableJobInstanceDao dao = new JdbcSearchableJobInstanceDao();
+    dao.setJdbcTemplate(jdbcTemplate());
+    return dao;
+  }
+
+  @Bean
+  public SearchableJobExecutionDao searchableJobExecutionDao() {
+    JdbcSearchableJobExecutionDao dao = new JdbcSearchableJobExecutionDao();
+    dao.setJdbcTemplate(jdbcTemplate());
+    dao.setDataSource(dataSource());
+    return dao;
+  }
+
+  @Bean
+  public SearchableStepExecutionDao searchableStepExecutionDao() {
+    JdbcSearchableStepExecutionDao dao = new JdbcSearchableStepExecutionDao();
+    dao.setDataSource(dataSource());
+    dao.setJdbcTemplate(jdbcTemplate());
+    return dao;
+  }
+
+  @Bean
+  public ExecutionContextDao executionContextDao() {
+    JdbcExecutionContextDao dao = new JdbcExecutionContextDao();
+    dao.setSerializer(new DefaultExecutionContextSerializer());
+    dao.setJdbcTemplate(jdbcTemplate());
+    return dao;
+  }
+
+  @Bean
+  public JobService jobService() throws Exception {
+    return new
+      SimpleJobService(searchableJobInstanceDao(), searchableJobExecutionDao(), searchableStepExecutionDao(),
+      jobRepository(), jobLauncher(), jobRegistry, executionContextDao());
+  }
+
   @Bean(name = "dummyStep")
   protected Step dummyStep(ItemReader<DummyObject> reader,
                        ItemProcessor<DummyObject, String> processor,

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/manager/JobManager.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/manager/JobManager.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/manager/JobManager.java
new file mode 100644
index 0000000..fc0a4f7
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/manager/JobManager.java
@@ -0,0 +1,274 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.manager;
+
+import com.google.common.collect.Lists;
+import org.apache.ambari.infra.model.ExecutionContextResponse;
+import org.apache.ambari.infra.model.JobDetailsResponse;
+import org.apache.ambari.infra.model.JobExecutionDetailsResponse;
+import org.apache.ambari.infra.model.JobExecutionInfoResponse;
+import org.apache.ambari.infra.model.JobInstanceDetailsResponse;
+import org.apache.ambari.infra.model.JobOperationParams;
+import org.apache.ambari.infra.model.StepExecutionContextResponse;
+import org.apache.ambari.infra.model.StepExecutionInfoResponse;
+import org.apache.ambari.infra.model.StepExecutionProgressResponse;
+import org.springframework.batch.admin.history.StepExecutionHistory;
+import org.springframework.batch.admin.service.JobService;
+import org.springframework.batch.admin.service.NoSuchStepExecutionException;
+import org.springframework.batch.admin.web.JobInfo;
+import org.springframework.batch.admin.web.StepExecutionProgress;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.JobInstance;
+import org.springframework.batch.core.JobParametersBuilder;
+import org.springframework.batch.core.JobParametersInvalidException;
+import org.springframework.batch.core.StepExecution;
+import org.springframework.batch.core.launch.JobExecutionNotRunningException;
+import org.springframework.batch.core.launch.JobInstanceAlreadyExistsException;
+import org.springframework.batch.core.launch.JobOperator;
+import org.springframework.batch.core.launch.NoSuchJobException;
+import org.springframework.batch.core.launch.NoSuchJobExecutionException;
+import org.springframework.batch.core.launch.NoSuchJobInstanceException;
+import org.springframework.batch.core.repository.JobExecutionAlreadyRunningException;
+import org.springframework.batch.core.repository.JobInstanceAlreadyCompleteException;
+import org.springframework.batch.core.repository.JobRestartException;
+
+import javax.inject.Inject;
+import javax.inject.Named;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TimeZone;
+
+@Named
+public class JobManager {
+
+  @Inject
+  private JobService jobService;
+
+  @Inject
+  private JobOperator jobOperator;
+
+  private TimeZone timeZone = TimeZone.getDefault();
+
+  public Set<String> getAllJobNames() {
+    return jobOperator.getJobNames();
+  }
+
+  /**
+   * Launch a new job instance (based on job name) and applies customized parameters to it.
+   * Also add a new date parameter to make sure the job instance will be unique
+   */
+  public JobExecutionInfoResponse launchJob(String jobName, String params)
+    throws JobParametersInvalidException, JobInstanceAlreadyExistsException, NoSuchJobException,
+    JobExecutionAlreadyRunningException, JobRestartException, JobInstanceAlreadyCompleteException {
+    // TODO: handle params
+    JobParametersBuilder jobParametersBuilder = new JobParametersBuilder();
+    jobParametersBuilder.addDate("date", new Date());
+    return new JobExecutionInfoResponse(jobService.launch(jobName, jobParametersBuilder.toJobParameters()), timeZone);
+  }
+
+  /**
+   * Get all executions ids that mapped to specific job name,
+   */
+  public Set<Long> getExecutionIdsByJobName(String jobName) throws NoSuchJobException {
+    return jobOperator.getRunningExecutions(jobName);
+  }
+
+  /**
+   * Stop all running job executions and returns with the number of stopped jobs.
+   */
+  public Integer stopAllJobs() {
+    return jobService.stopAll();
+  }
+
+  /**
+   * Gather job execution details by job execution id.
+   */
+  public JobExecutionDetailsResponse getExectionInfo(Long jobExecutionId) throws NoSuchJobExecutionException {
+    JobExecution jobExecution = jobService.getJobExecution(jobExecutionId);
+    List<StepExecutionInfoResponse> stepExecutionInfos = new ArrayList<StepExecutionInfoResponse>();
+    for (StepExecution stepExecution : jobExecution.getStepExecutions()) {
+      stepExecutionInfos.add(new StepExecutionInfoResponse(stepExecution, timeZone));
+    }
+    Collections.sort(stepExecutionInfos, new Comparator<StepExecutionInfoResponse>() {
+      @Override
+      public int compare(StepExecutionInfoResponse o1, StepExecutionInfoResponse o2) {
+        return o1.getId().compareTo(o2.getId());
+      }
+    });
+    return new JobExecutionDetailsResponse(new JobExecutionInfoResponse(jobExecution, timeZone), stepExecutionInfos);
+  }
+
+  /**
+   * Stop or abandon a running job execution by job execution id
+   */
+  public JobExecutionInfoResponse stopOrAbandonJobByExecutionId(Long jobExecutionId, JobOperationParams.JobStopOrAbandonOperationParam operation)
+    throws NoSuchJobExecutionException, JobExecutionNotRunningException, JobExecutionAlreadyRunningException {
+    JobExecution jobExecution;
+    if (JobOperationParams.JobStopOrAbandonOperationParam.STOP.equals(operation)) {
+      jobExecution = jobService.stop(jobExecutionId);
+    } else if (JobOperationParams.JobStopOrAbandonOperationParam.ABANDON.equals(operation)) {
+      jobExecution = jobService.abandon(jobExecutionId);
+    } else {
+      throw new UnsupportedOperationException("Unsupported operaration");
+    }
+    return new JobExecutionInfoResponse(jobExecution, timeZone);
+  }
+
+  /**
+   * Get execution context for a job execution instance. (context can be shipped between job executions)
+   */
+  public ExecutionContextResponse getExecutionContextByJobExecutionId(Long executionId) throws NoSuchJobExecutionException {
+    JobExecution jobExecution = jobService.getJobExecution(executionId);
+    Map<String, Object> executionMap = new HashMap<>();
+    for (Map.Entry<String, Object> entry : jobExecution.getExecutionContext().entrySet()) {
+      executionMap.put(entry.getKey(), entry.getValue());
+    }
+    return new ExecutionContextResponse(executionId, executionMap);
+  }
+
+  /**
+   * Restart a specific job instance with the same parameters. (only restart operation is supported here)
+   */
+  public JobExecutionInfoResponse restart(Long jobInstanceId, String jobName,
+                                          JobOperationParams.JobRestartOperationParam operation) throws NoSuchJobException, JobParametersInvalidException,
+    JobExecutionAlreadyRunningException, JobRestartException, JobInstanceAlreadyCompleteException, NoSuchJobExecutionException {
+    if (JobOperationParams.JobRestartOperationParam.RESTART.equals(operation)) {
+      Collection<JobExecution> jobExecutions = jobService.getJobExecutionsForJobInstance(jobName, jobInstanceId);
+      JobExecution jobExecution = jobExecutions.iterator().next();
+      Long jobExecutionId = jobExecution.getId();
+      return new JobExecutionInfoResponse(jobService.restart(jobExecutionId), timeZone);
+    } else {
+      throw new UnsupportedOperationException("Unsupported operation (try: RESTART)");
+    }
+  }
+
+  /**
+   * Get all job details. (paged)
+   */
+  public List<JobInfo> getAllJobs(int start, int pageSize) {
+    List<JobInfo> jobs = new ArrayList<>();
+    Collection<String> names = jobService.listJobs(start, pageSize);
+    for (String name : names) {
+      int count = 0;
+      try {
+        count = jobService.countJobExecutionsForJob(name);
+      }
+      catch (NoSuchJobException e) {
+        // shouldn't happen
+      }
+      boolean launchable = jobService.isLaunchable(name);
+      boolean incrementable = jobService.isIncrementable(name);
+      jobs.add(new JobInfo(name, count, null, launchable, incrementable));
+    }
+    return jobs;
+  }
+
+  /**
+   * Get all executions for unique job instance.
+   */
+  public List<JobExecutionInfoResponse> getExecutionsForJobInstance(String jobName, Long jobInstanceId) throws NoSuchJobInstanceException, NoSuchJobException {
+    List<JobExecutionInfoResponse> result = Lists.newArrayList();
+    JobInstance jobInstance = jobService.getJobInstance(jobInstanceId);
+    Collection<JobExecution> jobExecutions = jobService.getJobExecutionsForJobInstance(jobName, jobInstance.getInstanceId());
+    for (JobExecution jobExecution : jobExecutions) {
+      result.add(new JobExecutionInfoResponse(jobExecution, timeZone));
+    }
+    return result;
+  }
+
+  /**
+   * Get job details for a specific job. (paged)
+   */
+  public JobDetailsResponse getJobDetails(String jobName, int page, int size) throws NoSuchJobException {
+    List<JobInstanceDetailsResponse> jobInstanceResponses = Lists.newArrayList();
+    Collection<JobInstance> jobInstances = jobService.listJobInstances(jobName, page, size);
+
+    int count = jobService.countJobExecutionsForJob(jobName);
+    boolean launchable = jobService.isLaunchable(jobName);
+    boolean isIncrementable = jobService.isIncrementable(jobName);
+
+    for (JobInstance jobInstance: jobInstances) {
+      List<JobExecutionInfoResponse> executionInfos = Lists.newArrayList();
+      Collection<JobExecution> jobExecutions = jobService.getJobExecutionsForJobInstance(jobName, jobInstance.getId());
+      if (jobExecutions != null) {
+        for (JobExecution jobExecution : jobExecutions) {
+          executionInfos.add(new JobExecutionInfoResponse(jobExecution, timeZone));
+        }
+      }
+      jobInstanceResponses.add(new JobInstanceDetailsResponse(jobInstance, executionInfos));
+    }
+    return new JobDetailsResponse(new JobInfo(jobName, count, launchable, isIncrementable), jobInstanceResponses);
+  }
+
+  /**
+   * Get step execution details based for job execution id and step execution id.
+   */
+  public StepExecutionInfoResponse getStepExecution(Long jobExecutionId, Long stepExecutionId) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+    StepExecution stepExecution = jobService.getStepExecution(jobExecutionId, stepExecutionId);
+    return new StepExecutionInfoResponse(stepExecution, timeZone);
+  }
+
+  /**
+   * Get step execution context details. (execution context can be shipped between steps)
+   */
+  public StepExecutionContextResponse getStepExecutionContext(Long jobExecutionId, Long stepExecutionId) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+    StepExecution stepExecution = jobService.getStepExecution(jobExecutionId, stepExecutionId);
+    Map<String, Object> executionMap = new HashMap<>();
+    for (Map.Entry<String, Object> entry : stepExecution.getExecutionContext().entrySet()) {
+      executionMap.put(entry.getKey(), entry.getValue());
+    }
+    return new StepExecutionContextResponse(executionMap, jobExecutionId, stepExecutionId, stepExecution.getStepName());
+  }
+
+  /**
+   * Get step execution progress status detauls.
+   */
+  public StepExecutionProgressResponse getStepExecutionProgress(Long jobExecutionId, Long stepExecutionId) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+    StepExecution stepExecution = jobService.getStepExecution(jobExecutionId, stepExecutionId);
+    StepExecutionInfoResponse stepExecutionInfoResponse = new StepExecutionInfoResponse(stepExecution, timeZone);
+    String stepName = stepExecution.getStepName();
+    if (stepName.contains(":partition")) {
+      stepName = stepName.replaceAll("(:partition).*", "$1*");
+    }
+    String jobName = stepExecution.getJobExecution().getJobInstance().getJobName();
+    StepExecutionHistory stepExecutionHistory = computeHistory(jobName, stepName);
+    StepExecutionProgress stepExecutionProgress = new StepExecutionProgress(stepExecution, stepExecutionHistory);
+
+    return new StepExecutionProgressResponse(stepExecutionProgress, stepExecutionHistory, stepExecutionInfoResponse);
+
+  }
+
+  private StepExecutionHistory computeHistory(String jobName, String stepName) {
+    int total = jobService.countStepExecutionsForStep(jobName, stepName);
+    StepExecutionHistory stepExecutionHistory = new StepExecutionHistory(stepName);
+    for (int i = 0; i < total; i += 1000) {
+      for (StepExecution stepExecution : jobService.listStepExecutionsForStep(jobName, stepName, i, 1000)) {
+        stepExecutionHistory.append(stepExecution);
+      }
+    }
+    return stepExecutionHistory;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/ExecutionContextResponse.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/ExecutionContextResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/ExecutionContextResponse.java
new file mode 100644
index 0000000..2d46c54
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/ExecutionContextResponse.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import java.util.Map;
+
+public class ExecutionContextResponse {
+
+  private final Long jobExecutionId;
+  private final Map<String, Object> executionContextMap;
+
+  public ExecutionContextResponse(Long jobExecutionId, Map<String, Object> executionContextMap) {
+    this.jobExecutionId = jobExecutionId;
+    this.executionContextMap = executionContextMap;
+  }
+
+  public Long getJobExecutionId() {
+    return jobExecutionId;
+  }
+
+  public Map<String, Object> getExecutionContextMap() {
+    return executionContextMap;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobDetailsResponse.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobDetailsResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobDetailsResponse.java
new file mode 100644
index 0000000..cd34fef
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobDetailsResponse.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import org.springframework.batch.admin.web.JobInfo;
+
+import java.util.List;
+
+public class JobDetailsResponse {
+
+  private JobInfo jobInfo;
+  private List<JobInstanceDetailsResponse> jobInstanceDetailsResponseList;
+
+  public JobDetailsResponse() {
+  }
+
+  public JobDetailsResponse(JobInfo jobInfo, List<JobInstanceDetailsResponse> jobInstanceDetailsResponseList) {
+    this.jobInfo = jobInfo;
+    this.jobInstanceDetailsResponseList = jobInstanceDetailsResponseList;
+  }
+
+  public JobInfo getJobInfo() {
+    return jobInfo;
+  }
+
+  public void setJobInfo(JobInfo jobInfo) {
+    this.jobInfo = jobInfo;
+  }
+
+  public List<JobInstanceDetailsResponse> getJobInstanceDetailsResponseList() {
+    return jobInstanceDetailsResponseList;
+  }
+
+  public void setJobInstanceDetailsResponseList(List<JobInstanceDetailsResponse> jobInstanceDetailsResponseList) {
+    this.jobInstanceDetailsResponseList = jobInstanceDetailsResponseList;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionDetailsResponse.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionDetailsResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionDetailsResponse.java
new file mode 100644
index 0000000..695b57f
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionDetailsResponse.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import java.util.List;
+
+public class JobExecutionDetailsResponse {
+
+  private JobExecutionInfoResponse jobExecutionInfoResponse;
+
+  private List<StepExecutionInfoResponse> stepExecutionInfoList;
+
+  public JobExecutionDetailsResponse(JobExecutionInfoResponse jobExecutionInfoResponse, List<StepExecutionInfoResponse> stepExecutionInfoList) {
+    this.jobExecutionInfoResponse = jobExecutionInfoResponse;
+    this.stepExecutionInfoList = stepExecutionInfoList;
+  }
+
+  public JobExecutionInfoResponse getJobExecutionInfoResponse() {
+    return jobExecutionInfoResponse;
+  }
+
+  public void setJobExecutionInfoResponse(JobExecutionInfoResponse jobExecutionInfoResponse) {
+    this.jobExecutionInfoResponse = jobExecutionInfoResponse;
+  }
+
+  public List<StepExecutionInfoResponse> getStepExecutionInfoList() {
+    return stepExecutionInfoList;
+  }
+
+  public void setStepExecutionInfoList(List<StepExecutionInfoResponse> stepExecutionInfoList) {
+    this.stepExecutionInfoList = stepExecutionInfoList;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionInfoResponse.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionInfoResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionInfoResponse.java
new file mode 100644
index 0000000..a7e4a4f
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionInfoResponse.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import org.apache.ambari.infra.model.wrapper.JobExecutionData;
+import org.springframework.batch.admin.web.JobParametersExtractor;
+import org.springframework.batch.core.BatchStatus;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.JobInstance;
+import org.springframework.batch.core.converter.DefaultJobParametersConverter;
+import org.springframework.batch.core.converter.JobParametersConverter;
+
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Properties;
+import java.util.TimeZone;
+
+public class JobExecutionInfoResponse {
+  private Long id;
+  private int stepExecutionCount;
+  private Long jobId;
+  private String jobName;
+  private String startDate = "";
+  private String startTime = "";
+  private String duration = "";
+  private JobExecutionData jobExecutionData;
+  private Properties jobParameters;
+  private String jobParametersString;
+  private boolean restartable = false;
+  private boolean abandonable = false;
+  private boolean stoppable = false;
+  private final TimeZone timeZone;
+
+
+  public JobExecutionInfoResponse(JobExecution jobExecution, TimeZone timeZone) {
+    JobParametersConverter converter = new DefaultJobParametersConverter();
+    this.jobExecutionData = new JobExecutionData(jobExecution);
+    this.timeZone = timeZone;
+    this.id = jobExecutionData.getId();
+    this.jobId = jobExecutionData.getJobId();
+    this.stepExecutionCount = jobExecutionData.getStepExecutions().size();
+    this.jobParameters = converter.getProperties(jobExecutionData.getJobParameters());
+    this.jobParametersString = (new JobParametersExtractor()).fromJobParameters(jobExecutionData.getJobParameters());
+    JobInstance jobInstance = jobExecutionData.getJobInstance();
+    if(jobInstance != null) {
+      this.jobName = jobInstance.getJobName();
+      BatchStatus endTime = jobExecutionData.getStatus();
+      this.restartable = endTime.isGreaterThan(BatchStatus.STOPPING) && endTime.isLessThan(BatchStatus.ABANDONED);
+      this.abandonable = endTime.isGreaterThan(BatchStatus.STARTED) && endTime != BatchStatus.ABANDONED;
+      this.stoppable = endTime.isLessThan(BatchStatus.STOPPING);
+    } else {
+      this.jobName = "?";
+    }
+
+    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
+    SimpleDateFormat timeFormat = new SimpleDateFormat("HH:mm:ss");
+    SimpleDateFormat durationFormat = new SimpleDateFormat("HH:mm:ss");
+
+    durationFormat.setTimeZone(TimeZone.getTimeZone("GMT"));
+    timeFormat.setTimeZone(timeZone);
+    dateFormat.setTimeZone(timeZone);
+    if(jobExecutionData.getStartTime() != null) {
+      this.startDate = dateFormat.format(jobExecutionData.getStartTime());
+      this.startTime = timeFormat.format(jobExecutionData.getStartTime());
+      Date endTime1 = jobExecutionData.getEndTime() != null? jobExecutionData.getEndTime():new Date();
+      this.duration = durationFormat.format(new Date(endTime1.getTime() - jobExecutionData.getStartTime().getTime()));
+    }
+  }
+
+  public Long getId() {
+    return id;
+  }
+
+  public int getStepExecutionCount() {
+    return stepExecutionCount;
+  }
+
+  public Long getJobId() {
+    return jobId;
+  }
+
+  public String getJobName() {
+    return jobName;
+  }
+
+  public String getStartDate() {
+    return startDate;
+  }
+
+  public String getStartTime() {
+    return startTime;
+  }
+
+  public String getDuration() {
+    return duration;
+  }
+
+  public JobExecutionData getJobExecutionData() {
+    return jobExecutionData;
+  }
+
+  public Properties getJobParameters() {
+    return jobParameters;
+  }
+
+  public String getJobParametersString() {
+    return jobParametersString;
+  }
+
+  public boolean isRestartable() {
+    return restartable;
+  }
+
+  public boolean isAbandonable() {
+    return abandonable;
+  }
+
+  public boolean isStoppable() {
+    return stoppable;
+  }
+
+  public TimeZone getTimeZone() {
+    return timeZone;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRequest.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRequest.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRequest.java
new file mode 100644
index 0000000..b4c20e9
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRequest.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.ws.rs.PathParam;
+
+public class JobExecutionRequest {
+
+  @PathParam("jobName")
+  private String jobName;
+
+  @PathParam("jobInstanceId")
+  private Long jobInstanceId;
+
+  public String getJobName() {
+    return jobName;
+  }
+
+  public Long getJobInstanceId() {
+    return jobInstanceId;
+  }
+
+  public void setJobName(String jobName) {
+    this.jobName = jobName;
+  }
+
+  public void setJobInstanceId(Long jobInstanceId) {
+    this.jobInstanceId = jobInstanceId;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRestartRequest.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRestartRequest.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRestartRequest.java
new file mode 100644
index 0000000..88687e7
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRestartRequest.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+public class JobExecutionRestartRequest {
+
+  private String jobName;
+
+  private Long jobInstanceId;
+
+  private JobOperationParams.JobRestartOperationParam operation;
+
+  public String getJobName() {
+    return jobName;
+  }
+
+  public void setJobName(String jobName) {
+    this.jobName = jobName;
+  }
+
+  public Long getJobInstanceId() {
+    return jobInstanceId;
+  }
+
+  public void setJobExecutionId(Long jobExecutionId) {
+    this.jobInstanceId = jobExecutionId;
+  }
+
+  public JobOperationParams.JobRestartOperationParam getOperation() {
+    return operation;
+  }
+
+  public void setOperation(JobOperationParams.JobRestartOperationParam operation) {
+    this.operation = operation;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionStopRequest.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionStopRequest.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionStopRequest.java
new file mode 100644
index 0000000..b176f12
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionStopRequest.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.QueryParam;
+
+public class JobExecutionStopRequest {
+
+  @PathParam("jobExecutionId")
+  @NotNull
+  private Long jobExecutionId;
+
+  @QueryParam("operation")
+  @NotNull
+  private JobOperationParams.JobStopOrAbandonOperationParam operation;
+
+  public Long getJobExecutionId() {
+    return jobExecutionId;
+  }
+
+  public void setJobExecutionId(Long jobExecutionId) {
+    this.jobExecutionId = jobExecutionId;
+  }
+
+  public JobOperationParams.JobStopOrAbandonOperationParam getOperation() {
+    return operation;
+  }
+
+  public void setOperation(JobOperationParams.JobStopOrAbandonOperationParam operation) {
+    this.operation = operation;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceDetailsResponse.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceDetailsResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceDetailsResponse.java
new file mode 100644
index 0000000..af88654
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceDetailsResponse.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import org.springframework.batch.core.JobInstance;
+
+import java.util.List;
+
+public class JobInstanceDetailsResponse {
+
+  private JobInstance jobInstance;
+
+  private List<JobExecutionInfoResponse> jobExecutionInfoResponseList;
+
+  public JobInstanceDetailsResponse() {
+  }
+
+  public JobInstanceDetailsResponse(JobInstance jobInstance, List<JobExecutionInfoResponse> jobExecutionInfoResponseList) {
+    this.jobInstance = jobInstance;
+    this.jobExecutionInfoResponseList = jobExecutionInfoResponseList;
+  }
+
+  public JobInstance getJobInstance() {
+    return jobInstance;
+  }
+
+  public void setJobInstance(JobInstance jobInstance) {
+    this.jobInstance = jobInstance;
+  }
+
+  public List<JobExecutionInfoResponse> getJobExecutionInfoResponseList() {
+    return jobExecutionInfoResponseList;
+  }
+
+  public void setJobExecutionInfoResponseList(List<JobExecutionInfoResponse> jobExecutionInfoResponseList) {
+    this.jobExecutionInfoResponseList = jobExecutionInfoResponseList;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceStartRequest.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceStartRequest.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceStartRequest.java
new file mode 100644
index 0000000..905a4fa
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceStartRequest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.QueryParam;
+
+public class JobInstanceStartRequest {
+
+  @PathParam("jobName")
+  @NotNull
+  private String jobName;
+
+  @QueryParam("params")
+  String params;
+
+  public String getJobName() {
+    return jobName;
+  }
+
+  public void setJobName(String jobName) {
+    this.jobName = jobName;
+  }
+
+  public String getParams() {
+    return params;
+  }
+
+  public void setParams(String params) {
+    this.params = params;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobOperationParams.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobOperationParams.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobOperationParams.java
new file mode 100644
index 0000000..e286deb
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobOperationParams.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+public class JobOperationParams {
+
+  public enum JobStopOrAbandonOperationParam {
+    STOP, ABANDON;
+  }
+
+  public enum JobRestartOperationParam {
+    RESTART;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobRequest.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobRequest.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobRequest.java
new file mode 100644
index 0000000..b4fd478
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobRequest.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.PathParam;
+
+public class JobRequest extends PageRequest {
+
+  @NotNull
+  @PathParam("jobName")
+  private String jobName;
+
+  public String getJobName() {
+    return jobName;
+  }
+
+  public void setJobName(String jobName) {
+    this.jobName = jobName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/PageRequest.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/PageRequest.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/PageRequest.java
new file mode 100644
index 0000000..679d4fd
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/PageRequest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.QueryParam;
+
+public class PageRequest {
+
+  @QueryParam("page")
+  @DefaultValue("0")
+  private int page;
+
+  @QueryParam("size")
+  @DefaultValue("20")
+  private int size;
+
+  public int getPage() {
+    return page;
+  }
+
+  public void setPage(int page) {
+    this.page = page;
+  }
+
+  public int getSize() {
+    return size;
+  }
+
+  public void setSize(int size) {
+    this.size = size;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionContextResponse.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionContextResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionContextResponse.java
new file mode 100644
index 0000000..0e67a87
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionContextResponse.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import java.util.Map;
+
+public class StepExecutionContextResponse {
+
+  private Map<String, Object> executionContextMap;
+
+  private Long jobExecutionId;
+
+  private Long stepExecutionId;
+
+  private String stepName;
+
+  public StepExecutionContextResponse() {
+  }
+
+  public StepExecutionContextResponse(Map<String, Object> executionContextMap, Long jobExecutionId, Long stepExecutionId, String stepName) {
+    this.executionContextMap = executionContextMap;
+    this.jobExecutionId = jobExecutionId;
+    this.stepExecutionId = stepExecutionId;
+    this.stepName = stepName;
+  }
+
+  public Map<String, Object> getExecutionContextMap() {
+    return executionContextMap;
+  }
+
+  public Long getJobExecutionId() {
+    return jobExecutionId;
+  }
+
+  public Long getStepExecutionId() {
+    return stepExecutionId;
+  }
+
+  public String getStepName() {
+    return stepName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionInfoResponse.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionInfoResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionInfoResponse.java
new file mode 100644
index 0000000..ed04767
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionInfoResponse.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import org.apache.ambari.infra.model.wrapper.StepExecutionData;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.StepExecution;
+
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.TimeZone;
+
+public class StepExecutionInfoResponse {
+  private Long id;
+  private Long jobExecutionId;
+  private String jobName;
+  private String name;
+  private String startDate = "-";
+  private String startTime = "-";
+  private String duration = "-";
+  private StepExecutionData stepExecutionData;
+  private long durationMillis;
+
+  public StepExecutionInfoResponse(String jobName, Long jobExecutionId, String name, TimeZone timeZone) {
+    this.jobName = jobName;
+    this.jobExecutionId = jobExecutionId;
+    this.name = name;
+    this.stepExecutionData = new StepExecutionData(new StepExecution(name, new JobExecution(jobExecutionId)));
+  }
+
+  public StepExecutionInfoResponse(StepExecution stepExecution, TimeZone timeZone) {
+    this.stepExecutionData = new StepExecutionData(stepExecution);
+    this.id = stepExecutionData.getId();
+    this.name = stepExecutionData.getStepName();
+    this.jobName = stepExecutionData.getJobExecution() != null && stepExecutionData.getJobExecution().getJobInstance() != null? stepExecutionData.getJobExecution().getJobInstance().getJobName():"?";
+    this.jobExecutionId = stepExecutionData.getJobExecutionId();
+    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
+    SimpleDateFormat timeFormat = new SimpleDateFormat("HH:mm:ss");
+    SimpleDateFormat durationFormat = new SimpleDateFormat("HH:mm:ss");
+
+    durationFormat.setTimeZone(TimeZone.getTimeZone("GMT"));
+    timeFormat.setTimeZone(timeZone);
+    dateFormat.setTimeZone(timeZone);
+    if(stepExecutionData.getStartTime() != null) {
+      this.startDate = dateFormat.format(stepExecutionData.getStartTime());
+      this.startTime = timeFormat.format(stepExecutionData.getStartTime());
+      Date endTime = stepExecutionData.getEndTime() != null? stepExecutionData.getEndTime():new Date();
+      this.durationMillis = endTime.getTime() - stepExecutionData.getStartTime().getTime();
+      this.duration = durationFormat.format(new Date(this.durationMillis));
+    }
+
+  }
+
+  public Long getId() {
+    return this.id;
+  }
+
+  public Long getJobExecutionId() {
+    return this.jobExecutionId;
+  }
+
+  public String getName() {
+    return this.name;
+  }
+
+  public String getJobName() {
+    return this.jobName;
+  }
+
+  public String getStartDate() {
+    return this.startDate;
+  }
+
+  public String getStartTime() {
+    return this.startTime;
+  }
+
+  public String getDuration() {
+    return this.duration;
+  }
+
+  public long getDurationMillis() {
+    return this.durationMillis;
+  }
+
+  public String getStatus() {
+    return this.id != null?this.stepExecutionData.getStatus().toString():"NONE";
+  }
+
+  public String getExitCode() {
+    return this.id != null?this.stepExecutionData.getExitStatus().getExitCode():"NONE";
+  }
+
+  @JsonIgnore
+  public StepExecutionData getStepExecution() {
+    return this.stepExecutionData;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionProgressResponse.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionProgressResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionProgressResponse.java
new file mode 100644
index 0000000..26f9ed4
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionProgressResponse.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import org.springframework.batch.admin.history.StepExecutionHistory;
+import org.springframework.batch.admin.web.StepExecutionProgress;
+
+public class StepExecutionProgressResponse {
+
+  private StepExecutionProgress stepExecutionProgress;
+
+  private StepExecutionHistory stepExecutionHistory;
+
+  private StepExecutionInfoResponse stepExecutionInfoResponse;
+
+  public StepExecutionProgressResponse() {
+  }
+
+  public StepExecutionProgressResponse(StepExecutionProgress stepExecutionProgress, StepExecutionHistory stepExecutionHistory,
+                                       StepExecutionInfoResponse stepExecutionInfoResponse) {
+    this.stepExecutionProgress = stepExecutionProgress;
+    this.stepExecutionHistory = stepExecutionHistory;
+    this.stepExecutionInfoResponse = stepExecutionInfoResponse;
+  }
+
+  public StepExecutionProgress getStepExecutionProgress() {
+    return stepExecutionProgress;
+  }
+
+  public StepExecutionHistory getStepExecutionHistory() {
+    return stepExecutionHistory;
+  }
+
+  public StepExecutionInfoResponse getStepExecutionInfoResponse() {
+    return stepExecutionInfoResponse;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionRequest.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionRequest.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionRequest.java
new file mode 100644
index 0000000..2228171
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionRequest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.PathParam;
+
+public class StepExecutionRequest {
+
+  @PathParam("jobExecutionId")
+  @NotNull
+  private Long jobExecutionId;
+
+  @PathParam("stepExecutionId")
+  @NotNull
+  private Long stepExecutionId;
+
+  public Long getJobExecutionId() {
+    return jobExecutionId;
+  }
+
+  public void setJobExecutionId(Long jobExecutionId) {
+    this.jobExecutionId = jobExecutionId;
+  }
+
+  public Long getStepExecutionId() {
+    return stepExecutionId;
+  }
+
+  public void setStepExecutionId(Long stepExecutionId) {
+    this.stepExecutionId = stepExecutionId;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/JobExecutionData.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/JobExecutionData.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/JobExecutionData.java
new file mode 100644
index 0000000..28e262a
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/JobExecutionData.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model.wrapper;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.google.common.collect.Lists;
+import org.springframework.batch.core.BatchStatus;
+import org.springframework.batch.core.ExitStatus;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.JobInstance;
+import org.springframework.batch.core.JobParameters;
+import org.springframework.batch.core.StepExecution;
+import org.springframework.batch.item.ExecutionContext;
+
+import java.util.Collection;
+import java.util.Date;
+import java.util.List;
+
+/**
+ * Wrapper for #{{@link JobExecution}}
+ */
+public class JobExecutionData {
+
+  private JobExecution jobExecution;
+
+  public JobExecutionData(JobExecution jobExecution) {
+    this.jobExecution = jobExecution;
+  }
+
+  @JsonIgnore
+  public JobExecution getJobExecution() {
+    return jobExecution;
+  }
+
+  @JsonIgnore
+  public Collection<StepExecution> getStepExecutions() {
+    return jobExecution.getStepExecutions();
+  }
+
+  public JobParameters getJobParameters() {
+    return jobExecution.getJobParameters();
+  }
+
+  public JobInstance getJobInstance() {
+    return jobExecution.getJobInstance();
+  }
+
+  public Collection<StepExecutionData> getStepExecutionDataList() {
+    List<StepExecutionData> stepExecutionDataList = Lists.newArrayList();
+    Collection<StepExecution> stepExecutions = getStepExecutions();
+    if (stepExecutions != null) {
+      for (StepExecution stepExecution : stepExecutions) {
+        stepExecutionDataList.add(new StepExecutionData(stepExecution));
+      }
+    }
+    return stepExecutionDataList;
+  }
+
+  public BatchStatus getStatus() {
+    return jobExecution.getStatus();
+  }
+
+  public Date getStartTime() {
+    return jobExecution.getStartTime();
+  }
+
+  public Date getCreateTime() {
+    return jobExecution.getCreateTime();
+  }
+
+  public Date getEndTime() {
+    return jobExecution.getEndTime();
+  }
+
+  public Date getLastUpdated() {
+    return jobExecution.getLastUpdated();
+  }
+
+  public ExitStatus getExitStatus() {
+    return jobExecution.getExitStatus();
+  }
+
+  public ExecutionContext getExecutionContext() {
+    return jobExecution.getExecutionContext();
+  }
+
+  public List<Throwable> getFailureExceptions() {
+    return jobExecution.getFailureExceptions();
+  }
+
+  public String getJobConfigurationName() {
+    return jobExecution.getJobConfigurationName();
+  }
+
+  public Long getId() {
+    return jobExecution.getId();
+  }
+
+  public Long getJobId() {
+    return jobExecution.getJobId();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/StepExecutionData.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/StepExecutionData.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/StepExecutionData.java
new file mode 100644
index 0000000..26552ae
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/StepExecutionData.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model.wrapper;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import org.springframework.batch.core.BatchStatus;
+import org.springframework.batch.core.ExitStatus;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.StepExecution;
+import org.springframework.batch.item.ExecutionContext;
+
+import java.util.Date;
+import java.util.List;
+
+/**
+ * Wrapper for #{{@link StepExecution}}
+ */
+public class StepExecutionData {
+
+  @JsonIgnore
+  private final JobExecution jobExecution;
+
+  @JsonIgnore
+  private final StepExecution stepExecution;
+
+
+  public StepExecutionData(StepExecution stepExecution) {
+    this.stepExecution = stepExecution;
+    this.jobExecution = stepExecution.getJobExecution();
+  }
+
+  @JsonIgnore
+  public JobExecution getJobExecution() {
+    return jobExecution;
+  }
+
+  @JsonIgnore
+  public StepExecution getStepExecution() {
+    return stepExecution;
+  }
+
+  public String getStepName() {
+    return stepExecution.getStepName();
+  }
+
+  public int getReadCount() {
+    return stepExecution.getReadCount();
+  }
+
+  public BatchStatus getStatus() {
+    return stepExecution.getStatus();
+  }
+
+  public int getWriteCount() {
+    return stepExecution.getWriteCount();
+  }
+
+  public int getCommitCount() {
+    return stepExecution.getCommitCount();
+  }
+
+  public int getRollbackCount() {
+    return stepExecution.getRollbackCount();
+  }
+
+  public int getReadSkipCount() {
+    return stepExecution.getReadSkipCount();
+  }
+
+  public int getProcessSkipCount() {
+    return stepExecution.getProcessSkipCount();
+  }
+
+  public Date getStartTime() {
+    return stepExecution.getStartTime();
+  }
+
+  public int getWriteSkipCount() {
+    return stepExecution.getWriteSkipCount();
+  }
+
+  public Date getEndTime() {
+    return stepExecution.getEndTime();
+  }
+
+  public Date getLastUpdated() {
+    return stepExecution.getLastUpdated();
+  }
+
+  public ExecutionContext getExecutionContext() {
+    return stepExecution.getExecutionContext();
+  }
+
+  public ExitStatus getExitStatus() {
+    return stepExecution.getExitStatus();
+  }
+
+  public boolean isTerminateOnly() {
+    return stepExecution.isTerminateOnly();
+  }
+
+  public int getFilterCount() {
+    return stepExecution.getFilterCount();
+  }
+
+  public List<Throwable> getFailureExceptions() {
+    return stepExecution.getFailureExceptions();
+  }
+
+  public Long getId() {
+    return stepExecution.getId();
+  }
+
+  public Long getJobExecutionId() {
+    return stepExecution.getJobExecutionId();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobExceptionMapper.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobExceptionMapper.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobExceptionMapper.java
new file mode 100644
index 0000000..079cce3
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobExceptionMapper.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.rest;
+
+
+import com.google.common.collect.Maps;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.batch.admin.service.NoSuchStepExecutionException;
+import org.springframework.batch.core.JobParametersInvalidException;
+import org.springframework.batch.core.launch.JobExecutionNotFailedException;
+import org.springframework.batch.core.launch.JobExecutionNotRunningException;
+import org.springframework.batch.core.launch.JobExecutionNotStoppedException;
+import org.springframework.batch.core.launch.JobInstanceAlreadyExistsException;
+import org.springframework.batch.core.launch.JobParametersNotFoundException;
+import org.springframework.batch.core.launch.NoSuchJobException;
+import org.springframework.batch.core.launch.NoSuchJobExecutionException;
+import org.springframework.batch.core.launch.NoSuchJobInstanceException;
+import org.springframework.batch.core.repository.JobExecutionAlreadyRunningException;
+import org.springframework.batch.core.repository.JobInstanceAlreadyCompleteException;
+import org.springframework.batch.core.repository.JobRestartException;
+import org.springframework.batch.core.step.NoSuchStepException;
+import org.springframework.web.bind.MethodArgumentNotValidException;
+
+import javax.batch.operations.JobExecutionAlreadyCompleteException;
+import javax.inject.Named;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.ext.ExceptionMapper;
+import javax.ws.rs.ext.Provider;
+import java.util.Map;
+
+@Named
+@Provider
+public class JobExceptionMapper implements ExceptionMapper<Throwable> {
+
+  private static final Logger LOG = LoggerFactory.getLogger(JobExceptionMapper.class);
+
+  private static final Map<Class, Response.Status> exceptionStatusCodeMap = Maps.newHashMap();
+
+  static {
+    exceptionStatusCodeMap.put(MethodArgumentNotValidException.class, Response.Status.BAD_REQUEST);
+    exceptionStatusCodeMap.put(NoSuchJobException.class, Response.Status.NOT_FOUND);
+    exceptionStatusCodeMap.put(NoSuchStepException.class, Response.Status.NOT_FOUND);
+    exceptionStatusCodeMap.put(NoSuchStepExecutionException.class, Response.Status.NOT_FOUND);
+    exceptionStatusCodeMap.put(NoSuchJobExecutionException.class, Response.Status.NOT_FOUND);
+    exceptionStatusCodeMap.put(NoSuchJobInstanceException.class, Response.Status.NOT_FOUND);
+    exceptionStatusCodeMap.put(JobExecutionNotRunningException.class, Response.Status.INTERNAL_SERVER_ERROR);
+    exceptionStatusCodeMap.put(JobExecutionNotStoppedException.class, Response.Status.INTERNAL_SERVER_ERROR);
+    exceptionStatusCodeMap.put(JobInstanceAlreadyExistsException.class, Response.Status.ACCEPTED);
+    exceptionStatusCodeMap.put(JobInstanceAlreadyCompleteException.class, Response.Status.ACCEPTED);
+    exceptionStatusCodeMap.put(JobExecutionAlreadyRunningException.class, Response.Status.ACCEPTED);
+    exceptionStatusCodeMap.put(JobExecutionAlreadyCompleteException.class, Response.Status.ACCEPTED);
+    exceptionStatusCodeMap.put(JobParametersNotFoundException.class, Response.Status.NOT_FOUND);
+    exceptionStatusCodeMap.put(JobExecutionNotFailedException.class, Response.Status.INTERNAL_SERVER_ERROR);
+    exceptionStatusCodeMap.put(JobRestartException.class, Response.Status.INTERNAL_SERVER_ERROR);
+    exceptionStatusCodeMap.put(JobParametersInvalidException.class, Response.Status.BAD_REQUEST);
+  }
+
+  @Override
+  public Response toResponse(Throwable throwable) {
+    LOG.error("REST Exception occurred:", throwable);
+    Response.Status status = Response.Status.INTERNAL_SERVER_ERROR;
+
+    for (Map.Entry<Class, Response.Status> entry : exceptionStatusCodeMap.entrySet()) {
+      if (throwable.getClass().isAssignableFrom(entry.getKey())) {
+        status = entry.getValue();
+        LOG.info("Exception mapped to: {} with status code: {}", entry.getKey().getCanonicalName(), entry.getValue().getStatusCode());
+        break;
+      }
+    }
+
+    return Response.status(status).entity(new StatusMessage(throwable.getMessage(), status.getStatusCode()))
+      .type(MediaType.APPLICATION_JSON_TYPE).build();
+  }
+
+  private class StatusMessage {
+    private String message;
+    private int statusCode;
+
+    StatusMessage(String message, int statusCode) {
+      this.message = message;
+      this.statusCode = statusCode;
+    }
+
+    public String getMessage() {
+      return message;
+    }
+
+    public int getStatusCode() {
+      return statusCode;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9ffef7fc/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobResource.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobResource.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobResource.java
index 27fed40..7023957 100644
--- a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobResource.java
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobResource.java
@@ -20,23 +20,46 @@ package org.apache.ambari.infra.rest;
 
 import io.swagger.annotations.Api;
 import io.swagger.annotations.ApiOperation;
-import org.springframework.batch.core.JobParametersBuilder;
+import org.apache.ambari.infra.manager.JobManager;
+import org.apache.ambari.infra.model.ExecutionContextResponse;
+import org.apache.ambari.infra.model.JobDetailsResponse;
+import org.apache.ambari.infra.model.JobExecutionDetailsResponse;
+import org.apache.ambari.infra.model.JobExecutionInfoResponse;
+import org.apache.ambari.infra.model.JobExecutionRequest;
+import org.apache.ambari.infra.model.JobExecutionRestartRequest;
+import org.apache.ambari.infra.model.JobExecutionStopRequest;
+import org.apache.ambari.infra.model.JobInstanceStartRequest;
+import org.apache.ambari.infra.model.JobRequest;
+import org.apache.ambari.infra.model.PageRequest;
+import org.apache.ambari.infra.model.StepExecutionContextResponse;
+import org.apache.ambari.infra.model.StepExecutionInfoResponse;
+import org.apache.ambari.infra.model.StepExecutionProgressResponse;
+import org.apache.ambari.infra.model.StepExecutionRequest;
+import org.springframework.batch.admin.service.NoSuchStepExecutionException;
+import org.springframework.batch.admin.web.JobInfo;
 import org.springframework.batch.core.JobParametersInvalidException;
-import org.springframework.batch.core.explore.JobExplorer;
+import org.springframework.batch.core.launch.JobExecutionNotRunningException;
 import org.springframework.batch.core.launch.JobInstanceAlreadyExistsException;
-import org.springframework.batch.core.launch.JobOperator;
 import org.springframework.batch.core.launch.NoSuchJobException;
+import org.springframework.batch.core.launch.NoSuchJobExecutionException;
+import org.springframework.batch.core.launch.NoSuchJobInstanceException;
+import org.springframework.batch.core.repository.JobExecutionAlreadyRunningException;
+import org.springframework.batch.core.repository.JobInstanceAlreadyCompleteException;
+import org.springframework.batch.core.repository.JobRestartException;
 import org.springframework.context.annotation.Scope;
 
 import javax.inject.Inject;
 import javax.inject.Named;
+import javax.validation.Valid;
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.BeanParam;
+import javax.ws.rs.DELETE;
 import javax.ws.rs.GET;
 import javax.ws.rs.POST;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
-import java.util.Date;
+import java.util.List;
 import java.util.Set;
 
 @Api(value = "jobs", description = "Job operations")
@@ -46,35 +69,123 @@ import java.util.Set;
 public class JobResource {
 
   @Inject
-  private JobOperator jobOperator;
+  private JobManager jobManager;
 
-  @Inject
-  private JobExplorer jobExplorer;
+  @GET
+  @Produces({"application/json"})
+  @ApiOperation("Get all jobs")
+  public List<JobInfo> getAllJobs(@BeanParam @Valid PageRequest request) {
+    return jobManager.getAllJobs(request.getPage(), request.getSize());
+  }
+
+  @POST
+  @Produces({"application/json"})
+  @Path("{jobName}")
+  @ApiOperation("Start a new job instance by job name.")
+  public JobExecutionInfoResponse startJob(@BeanParam @Valid JobInstanceStartRequest request)
+    throws JobParametersInvalidException, JobInstanceAlreadyExistsException, NoSuchJobException, JobExecutionAlreadyRunningException,
+    JobRestartException, JobInstanceAlreadyCompleteException {
+    return jobManager.launchJob(request.getJobName(), request.getParams());
+  }
 
   @GET
   @Produces({"application/json"})
+  @Path("/info/names")
   @ApiOperation("Get all job names")
   public Set<String> getAllJobNames() {
-    return jobOperator.getJobNames();
+    return jobManager.getAllJobNames();
+  }
+
+  @GET
+  @Produces({"application/json"})
+  @Path("/info/{jobName}")
+  @ApiOperation("Get job details by job name.")
+  public JobDetailsResponse getJobDetails(@BeanParam @Valid JobRequest jobRequest) throws NoSuchJobException {
+    return jobManager.getJobDetails(jobRequest.getJobName(), jobRequest.getPage(), jobRequest.getSize());
+  }
+
+  @GET
+  @Path("{jobName}/executions")
+  @Produces({"application/json"})
+  @ApiOperation("Get the id values of all the running job instances.")
+  public Set<Long> getExecutionIdsByJobName(@PathParam("jobName") @NotNull @Valid String jobName) throws NoSuchJobException {
+    return jobManager.getExecutionIdsByJobName(jobName);
   }
 
   @GET
-  @Path("executions/{jobName}")
   @Produces({"application/json"})
-  @ApiOperation("Get the id values of all the running job instances by job name")
-  public Set<Long> getExecutionIdsByJobName(
-    @PathParam("jobName") String jobName) throws NoSuchJobException {
-    return jobOperator.getRunningExecutions(jobName);
+  @Path("/executions/{jobExecutionId}")
+  @ApiOperation("Get job and step details for job execution instance.")
+  public JobExecutionDetailsResponse getExectionInfo(@PathParam("jobExecutionId") @Valid Long jobExecutionId) throws NoSuchJobExecutionException {
+    return jobManager.getExectionInfo(jobExecutionId);
+  }
+
+  @GET
+  @Produces({"application/json"})
+  @Path("/executions/{jobExecutionId}/context")
+  @ApiOperation("Get execution context for specific job.")
+  public ExecutionContextResponse getExecutionContextByJobExecId(@PathParam("jobExecutionId") Long executionId) throws NoSuchJobExecutionException {
+    return jobManager.getExecutionContextByJobExecutionId(executionId);
+  }
+
+
+  @DELETE
+  @Produces({"application/json"})
+  @Path("/executions/{jobExecutionId}")
+  @ApiOperation("Stop or abandon a running job execution.")
+  public JobExecutionInfoResponse stopOrAbandonJobExecution(@BeanParam @Valid JobExecutionStopRequest request)
+    throws NoSuchJobExecutionException, JobExecutionNotRunningException, JobExecutionAlreadyRunningException {
+    return jobManager.stopOrAbandonJobByExecutionId(request.getJobExecutionId(), request.getOperation());
+  }
+
+  @DELETE
+  @Produces({"application/json"})
+  @Path("/executions")
+  @ApiOperation("Stop all job executions.")
+  public Integer stopAll() {
+    return jobManager.stopAllJobs();
+  }
+
+  @GET
+  @Produces({"application/json"})
+  @Path("/{jobName}/{jobInstanceId}/executions")
+  @ApiOperation("Get execution for job instance.")
+  public List<JobExecutionInfoResponse> getExecutionsForInstance(@BeanParam @Valid JobExecutionRequest request) throws JobInstanceAlreadyCompleteException,
+    NoSuchJobExecutionException, JobExecutionAlreadyRunningException, JobParametersInvalidException, JobRestartException, NoSuchJobException, NoSuchJobInstanceException {
+    return jobManager.getExecutionsForJobInstance(request.getJobName(), request.getJobInstanceId());
   }
 
   @POST
   @Produces({"application/json"})
-  @Path("start/{jobName}")
-  public Long startJob(@PathParam("jobName") String jobName, @QueryParam("params") String params)
-    throws JobParametersInvalidException, JobInstanceAlreadyExistsException, NoSuchJobException {
-    JobParametersBuilder jobParametersBuilder = new JobParametersBuilder();
-    jobParametersBuilder.addDate("date", new Date());
-    return jobOperator.start(jobName, jobParametersBuilder.toJobParameters() + "," + params);
+  @Path("/{jobName}/{jobInstanceId}/executions")
+  @ApiOperation("Restart job instance.")
+  public JobExecutionInfoResponse restartJobInstance(@BeanParam @Valid JobExecutionRestartRequest request) throws JobInstanceAlreadyCompleteException,
+    NoSuchJobExecutionException, JobExecutionAlreadyRunningException, JobParametersInvalidException, JobRestartException, NoSuchJobException {
+    return jobManager.restart(request.getJobInstanceId(), request.getJobName(), request.getOperation());
+  }
+
+  @GET
+  @Produces({"application/json"})
+  @Path("/executions/{jobExecutionId}/steps/{stepExecutionId}")
+  @ApiOperation("Get step execution details.")
+  public StepExecutionInfoResponse getStepExecution(@BeanParam @Valid StepExecutionRequest request) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+    return jobManager.getStepExecution(request.getJobExecutionId(), request.getStepExecutionId());
+  }
+
+  @GET
+  @Produces({"application/json"})
+  @Path("/executions/{jobExecutionId}/steps/{stepExecutionId}/execution-context")
+  @ApiOperation("Get the execution context of step execution.")
+  public StepExecutionContextResponse getStepExecutionContext(@BeanParam @Valid StepExecutionRequest request) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+    return jobManager.getStepExecutionContext(request.getJobExecutionId(), request.getStepExecutionId());
+  }
+
+  @GET
+  @Produces({"application/json"})
+  @Path("/executions/{jobExecutionId}/steps/{stepExecutionId}/progress")
+  @ApiOperation("Get progress of step execution.")
+  public StepExecutionProgressResponse getStepExecutionProgress(@BeanParam @Valid StepExecutionRequest request) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+    return jobManager.getStepExecutionProgress(request.getJobExecutionId(), request.getStepExecutionId());
   }
 
 }


[42/50] [abbrv] ambari git commit: AMBARI-21069. Minimize config changes during ambari upgrade. Add script to compare stack configs (dlysnichenko)

Posted by ad...@apache.org.
AMBARI-21069. Minimize config changes during ambari upgrade. Add script to compare stack configs (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5ea441aa
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5ea441aa
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5ea441aa

Branch: refs/heads/ambari-rest-api-explorer
Commit: 5ea441aa7b4080a7a1eb07a4a1e01a2aa8d0d92e
Parents: cdc18ec
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Mon May 22 18:48:32 2017 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Mon May 22 18:48:32 2017 +0300

----------------------------------------------------------------------
 .../config-utils/diff_stack_properties.py       | 154 +++++++++++++++++++
 1 file changed, 154 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5ea441aa/dev-support/config-utils/diff_stack_properties.py
----------------------------------------------------------------------
diff --git a/dev-support/config-utils/diff_stack_properties.py b/dev-support/config-utils/diff_stack_properties.py
new file mode 100644
index 0000000..beef608
--- /dev/null
+++ b/dev-support/config-utils/diff_stack_properties.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+
+import xml.etree.ElementTree as ET
+
+COMMON = "common-services"
+STACKS = "stacks"
+CONFIG_DIR = "configuration"
+SERVICES_DIR = "services"
+
+SYMLINKS_TXT = "symlinks.txt"
+VERSIONS_TXT = "versions.txt"
+
+
+def main():
+  """ Parse arguments from user, check that all required args are passed in and start work."""
+
+  if len(sys.argv) != 3:
+    print "usage: diff_stack_properties.py [first_stack_dir] [second_stack_dir]"
+    sys.exit(-1)
+
+  args = sys.argv[1:]
+
+  if not os.path.exists(args[0]) or not os.path.exists(args[1]):
+    print "usage: diff_stack_properties.py [first_stack_dir] [second_stack_dir]"
+    sys.exit(-1)
+
+  args = sys.argv[1:]
+
+  do_work(args)
+
+
+def do_work(args):
+  """
+  Compare stack dirs.
+  :param args:
+  """
+  new_stacks = args[0]
+  old_stacks = args[1]
+
+  compare_common(new_stacks, old_stacks)
+
+  compare_stacks(new_stacks, old_stacks)
+
+
+def compare_stacks(new_stacks, old_stacks):
+  print "#############[{}]#############".format(STACKS)
+  for stack in [stack for stack in os.listdir(os.path.join(new_stacks, STACKS)) if
+                os.path.isdir(os.path.join(new_stacks, STACKS, stack))]:
+    for version in os.listdir(os.path.join(new_stacks, STACKS, stack)):
+      if os.path.exists(os.path.join(new_stacks, STACKS, stack, version, CONFIG_DIR)):
+        diff = compare_config_dirs(os.path.join(new_stacks, STACKS, stack, version, CONFIG_DIR),
+                                   os.path.join(old_stacks, STACKS, stack, version, CONFIG_DIR))
+        if diff != "":
+          print "#############{}.{}#############".format(stack, version)
+          print diff
+      if os.path.exists(os.path.join(new_stacks, STACKS, stack, version, SERVICES_DIR)):
+        print "#############{}.{}#############".format(stack, version)
+        for service_name in os.listdir(os.path.join(new_stacks, STACKS, stack, version, SERVICES_DIR)):
+          new_configs_dir = os.path.join(new_stacks, STACKS, stack, version, SERVICES_DIR, service_name, CONFIG_DIR)
+          old_configs_dir = os.path.join(old_stacks, STACKS, stack, version, SERVICES_DIR, service_name, CONFIG_DIR)
+          diff = compare_config_dirs(new_configs_dir, old_configs_dir)
+          if diff != "":
+            print "=========={}==========".format(service_name)
+            print diff
+
+
+def compare_common(new_stacks, old_stacks):
+  print "#############[{}]#############".format(COMMON)
+  for service_name in os.listdir(os.path.join(new_stacks, COMMON)):
+    for version in os.listdir(os.path.join(new_stacks, COMMON, service_name)):
+      new_configs_dir = os.path.join(new_stacks, COMMON, service_name, version, CONFIG_DIR)
+      old_configs_dir = os.path.join(old_stacks, COMMON, service_name, version, CONFIG_DIR)
+      diff = compare_config_dirs(new_configs_dir, old_configs_dir)
+      if diff != "":
+        print "=========={}.{}==========".format(service_name, version)
+        print diff
+
+
+def compare_config_dirs(new_configs_dir, old_configs_dir):
+  result = ""
+  if os.path.exists(old_configs_dir) and os.path.exists(new_configs_dir):
+    for file_name in os.listdir(new_configs_dir):
+      old_file_name = os.path.join(old_configs_dir, file_name)
+      if os.path.exists(old_file_name):
+        result += compare_config_files(os.path.join(new_configs_dir, file_name),
+                                       os.path.join(old_configs_dir, file_name),
+                                       file_name)
+      else:
+        result += "new file {}\n".format(file_name)
+  else:
+    if os.path.exists(old_configs_dir) or os.path.exists(new_configs_dir):
+      if not os.path.exists(new_configs_dir):
+        result += "deleted configuration dir {}\n".format(new_configs_dir)
+      if not os.path.exists(old_configs_dir):
+        result += "new configuration dir {} with files {} \n".format(new_configs_dir, os.listdir(new_configs_dir))
+  return result
+
+
+def compare_config_files(new_configs, old_configs, file_name):
+  result = ""
+  if os.path.exists(old_configs):
+    old_configs_tree = ET.ElementTree(file=old_configs)
+    new_configs_tree = ET.ElementTree(file=new_configs)
+    for new_property in new_configs_tree.findall("property"):
+      name = new_property.find("name").text
+      if new_property.find("value") is not None:
+        value = new_property.find("value").text
+      if new_property.find("on-ambari-upgrade") is not None:
+        on_amb_upgrade = new_property.find("on-ambari-upgrade").get("add")
+      else:
+        on_amb_upgrade = None
+
+      deleted = None
+      old_deleted = None
+      if new_property.find("deleted") is not None:
+        deleted = new_property.find("deleted").text
+      old_property = old_configs_tree.find("property[name='{}']".format(name))
+
+      if on_amb_upgrade == "true" and old_property is None:
+        result += "add {}\n".format(name)
+      else:
+        if old_property is not None and old_property.find("deleted") is not None:
+          old_deleted = old_property.find("deleted").text
+        if deleted == "true" and old_deleted != "true":
+          result += "deleted {}\n".format(name)
+    if result != "":
+      result = "------{}------\n".format(file_name) + result
+  else:
+    result += "{} not exists\n".format(old_configs, )
+  return result
+
+
+if __name__ == "__main__":
+  main()


[03/50] [abbrv] ambari git commit: AMBARI-20758 Aggregate local metrics for minute aggregation time window (dsen)

Posted by ad...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
index b876a3d..28944ca 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
@@ -23,6 +23,8 @@ port={{metric_collector_port}}
 collectionFrequency={{metrics_collection_period}}000
 maxRowCacheSize=10000
 sendInterval={{metrics_report_interval}}000
+host_in_memory_aggregation = {{host_in_memory_aggregation}}
+host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 instanceId={{cluster_name}}
 set.instanceId={{set_instanceId}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index efea167..d45aea6 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -184,6 +184,9 @@ if has_metric_collector:
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
 # if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
 if 'slave_hosts' in config['clusterHostInfo']:
   rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
index 24535c5..c8f2f13 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
@@ -78,6 +78,8 @@ hbase.sink.timeline.protocol={{metric_collector_protocol}}
 hbase.sink.timeline.port={{metric_collector_port}}
 hbase.sink.timeline.instanceId={{cluster_name}}
 hbase.sink.timeline.set.instanceId={{set_instanceId}}
+hbase.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+hbase.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 hbase.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
index 9076269..f4e25e1 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
@@ -76,6 +76,8 @@ hbase.sink.timeline.protocol={{metric_collector_protocol}}
 hbase.sink.timeline.port={{metric_collector_port}}
 hbase.sink.timeline.instanceId={{cluster_name}}
 hbase.sink.timeline.set.instanceId={{set_instanceId}}
+hbase.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+hbase.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 hbase.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
index fae61d3..4b03880 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
@@ -88,6 +88,8 @@ resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 *.sink.timeline.port={{metric_collector_port}}
 *.sink.timeline.instanceId={{cluster_name}}
 *.sink.timeline.set.instanceId={{set_instanceId}}
+*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index d854451..c1128a5 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -566,6 +566,8 @@ if has_metric_collector:
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
 ########################################################
 ############# Atlas related params #####################
 ########################################################

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
index 9328f9f..d78a342 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
@@ -53,6 +53,8 @@
   hivemetastore.sink.timeline.collector.hosts={{ams_collector_hosts}}
   hivemetastore.sink.timeline.port={{metric_collector_port}}
   hivemetastore.sink.timeline.protocol={{metric_collector_protocol}}
+  hivemetastore.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  hivemetastore.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
index 9a7f9dc..1f496ef 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
@@ -53,5 +53,7 @@
   hiveserver2.sink.timeline.collector.hosts={{ams_collector_hosts}}
   hiveserver2.sink.timeline.port={{metric_collector_port}}
   hiveserver2.sink.timeline.protocol={{metric_collector_protocol}}
+  hiveserver2.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  hiveserver2.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2
index e9fe024..01869c0 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2
@@ -52,5 +52,7 @@
   llapdaemon.sink.timeline.collector.hosts={{ams_collector_hosts}}
   llapdaemon.sink.timeline.port={{metric_collector_port}}
   llapdaemon.sink.timeline.protocol={{metric_collector_protocol}}
+  llapdaemon.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  llapdaemon.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
index bd7eb0c..2e25c4a 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
@@ -52,5 +52,7 @@
   llaptaskscheduler.sink.timeline.collector.hosts={{ams_collector_hosts}}
   llaptaskscheduler.sink.timeline.port={{metric_collector_port}}
   llaptaskscheduler.sink.timeline.protocol={{metric_collector_protocol}}
+  llaptaskscheduler.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  llaptaskscheduler.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py
index c0ac535..a12d388 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py
@@ -565,6 +565,9 @@ if has_metric_collector:
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
 ########################################################
 ############# Atlas related params #####################
 ########################################################

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
index 9328f9f..d78a342 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
@@ -53,6 +53,8 @@
   hivemetastore.sink.timeline.collector.hosts={{ams_collector_hosts}}
   hivemetastore.sink.timeline.port={{metric_collector_port}}
   hivemetastore.sink.timeline.protocol={{metric_collector_protocol}}
+  hivemetastore.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  hivemetastore.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
index 9a7f9dc..1f496ef 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
@@ -53,5 +53,7 @@
   hiveserver2.sink.timeline.collector.hosts={{ams_collector_hosts}}
   hiveserver2.sink.timeline.port={{metric_collector_port}}
   hiveserver2.sink.timeline.protocol={{metric_collector_protocol}}
+  hiveserver2.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  hiveserver2.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2
index e9fe024..01869c0 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2
@@ -52,5 +52,7 @@
   llapdaemon.sink.timeline.collector.hosts={{ams_collector_hosts}}
   llapdaemon.sink.timeline.port={{metric_collector_port}}
   llapdaemon.sink.timeline.protocol={{metric_collector_protocol}}
+  llapdaemon.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  llapdaemon.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
index bd7eb0c..2e25c4a 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
@@ -52,5 +52,7 @@
   llaptaskscheduler.sink.timeline.collector.hosts={{ams_collector_hosts}}
   llaptaskscheduler.sink.timeline.port={{metric_collector_port}}
   llaptaskscheduler.sink.timeline.protocol={{metric_collector_protocol}}
+  llaptaskscheduler.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  llaptaskscheduler.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
index e01dacd..26e7a77 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
@@ -422,4 +422,15 @@
     <description>Timeline metrics reporter send interval</description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>kafka.timeline.metrics.host_in_memory_aggregation</name>
+    <value>{{host_in_memory_aggregation}}</value>
+    <description>if set to "true" host metrics will be aggregated in memory on each host</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.host_in_memory_aggregation_port</name>
+    <value>{{host_in_memory_aggregation_port}}</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
index 5b0be54..9acc1ef 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
@@ -156,6 +156,9 @@ if has_metric_collector:
     metric_collector_protocol = 'https'
   else:
     metric_collector_protocol = 'http'
+
+  host_in_memory_aggregation = str(default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)).lower()
+  host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
   pass
 
 # Security-related params

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
index d9fae76..78ec165 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
@@ -208,6 +208,8 @@ metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sin
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 metric_collector_sink_jar = "/usr/lib/storm/lib/ambari-metrics-storm-sink-with-common-*.jar"
 metric_collector_legacy_sink_jar = "/usr/lib/storm/lib/ambari-metrics-storm-sink-legacy-with-common-*.jar"
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
 
 
 # Cluster Zookeeper quorum

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2 b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2
index 51162e8..67b89c4 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2
@@ -61,6 +61,8 @@ metrics_collector:
   protocol: "{{metric_collector_protocol}}"
   port: "{{metric_collector_port}}"
   appId: "{{metric_collector_app_id}}"
+  host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
   # HTTPS settings
   truststore.path : "{{metric_truststore_path}}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2
index 0501039..1dedffc 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2
@@ -25,6 +25,8 @@ sendInterval={{metrics_report_interval}}000
 clusterReporterAppId=nimbus
 instanceId={{cluster_name}}
 set.instanceId={{set_instanceId}}
+host_in_memory_aggregation = {{host_in_memory_aggregation}}
+host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index 7282bb5..3488e75 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -164,6 +164,9 @@ if has_metric_collector:
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
 # Cluster Zookeeper quorum
 zookeeper_quorum = None
 if has_zk_host:

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
index 1b02a97..1f8499f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -77,6 +77,8 @@ resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 *.sink.timeline.port={{metric_collector_port}}
 *.sink.timeline.instanceId={{cluster_name}}
 *.sink.timeline.set.instanceId={{set_instanceId}}
+*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml
index fae61d3..4b03880 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml
@@ -88,6 +88,8 @@ resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 *.sink.timeline.port={{metric_collector_port}}
 *.sink.timeline.instanceId={{cluster_name}}
 *.sink.timeline.set.instanceId={{set_instanceId}}
+*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
index 678bbde..a3830f7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
@@ -158,6 +158,8 @@ if has_metric_collector:
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
 
 # Cluster Zookeeper quorum
 zookeeper_quorum = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
index 1b02a97..1f8499f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -77,6 +77,8 @@ resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 *.sink.timeline.port={{metric_collector_port}}
 *.sink.timeline.instanceId={{cluster_name}}
 *.sink.timeline.set.instanceId={{set_instanceId}}
+*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/test/java/org/apache/ambari/server/metric/system/impl/TestAmbariMetricsSinkImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/metric/system/impl/TestAmbariMetricsSinkImpl.java b/ambari-server/src/test/java/org/apache/ambari/server/metric/system/impl/TestAmbariMetricsSinkImpl.java
index 4a60892..3ee8ebc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/metric/system/impl/TestAmbariMetricsSinkImpl.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/metric/system/impl/TestAmbariMetricsSinkImpl.java
@@ -74,6 +74,16 @@ public class TestAmbariMetricsSinkImpl extends AbstractTimelineMetricsSink imple
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return true;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return 61888;
+  }
+
+  @Override
   public void init(MetricsConfiguration configuration) {
 
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/hooks/before-START/scripts/params.py b/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/hooks/before-START/scripts/params.py
index 6a70675..8cc876f 100644
--- a/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/hooks/before-START/scripts/params.py
+++ b/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/hooks/before-START/scripts/params.py
@@ -135,6 +135,8 @@ if has_metric_collector:
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
 #hadoop params
 
 if has_namenode or dfs_type == 'HCFS':


[15/50] [abbrv] ambari git commit: AMBARI-21048. HDP 3.0 TP - create service definition for Storm with configs, kerberos, widgets, etc.(vbrodetsky)

Posted by ad...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params_linux.py
new file mode 100644
index 0000000..78ec165
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params_linux.py
@@ -0,0 +1,424 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import re
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+import status_params
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from ambari_commons import yaml_utils
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.get_bare_principal import get_bare_principal
+from resource_management.libraries.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
+from resource_management.libraries.functions import is_empty
+from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
+from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+stack_root = status_params.stack_root
+sudo = AMBARI_SUDO_BINARY
+
+limits_conf_dir = "/etc/security/limits.d"
+
+# Needed since this is an Atlas Hook service.
+cluster_name = config['clusterName']
+
+stack_name = status_params.stack_name
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+version = default("/commandParams/version", None)
+
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+
+storm_component_home_dir = status_params.storm_component_home_dir
+conf_dir = status_params.conf_dir
+
+stack_version_unformatted = status_params.stack_version_unformatted
+stack_version_formatted = status_params.stack_version_formatted
+stack_supports_ru = stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted)
+stack_supports_storm_kerberos = stack_version_formatted and check_stack_feature(StackFeature.STORM_KERBEROS, stack_version_formatted)
+stack_supports_storm_ams = stack_version_formatted and check_stack_feature(StackFeature.STORM_AMS, stack_version_formatted)
+stack_supports_core_site_for_ranger_plugin = check_stack_feature(StackFeature.CORE_SITE_FOR_RANGER_PLUGINS_SUPPORT, stack_version_formatted)
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
+
+# default hadoop params
+rest_lib_dir = "/usr/lib/storm/contrib/storm-rest"
+storm_bin_dir = "/usr/bin"
+storm_lib_dir = "/usr/lib/storm/lib/"
+
+# hadoop parameters for 2.2+
+if stack_supports_ru:
+  rest_lib_dir = format("{storm_component_home_dir}/contrib/storm-rest")
+  storm_bin_dir = format("{storm_component_home_dir}/bin")
+  storm_lib_dir = format("{storm_component_home_dir}/lib")
+  log4j_dir = format("{storm_component_home_dir}/log4j2")
+
+storm_user = config['configurations']['storm-env']['storm_user']
+log_dir = config['configurations']['storm-env']['storm_log_dir']
+pid_dir = status_params.pid_dir
+local_dir = config['configurations']['storm-site']['storm.local.dir']
+user_group = config['configurations']['cluster-env']['user_group']
+java64_home = config['hostLevelParams']['java_home']
+jps_binary = format("{java64_home}/bin/jps")
+nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
+storm_zookeeper_root_dir = default('/configurations/storm-site/storm.zookeeper.root', None)
+storm_zookeeper_servers = config['configurations']['storm-site']['storm.zookeeper.servers']
+storm_zookeeper_port = config['configurations']['storm-site']['storm.zookeeper.port']
+storm_logs_supported = config['configurations']['storm-env']['storm_logs_supported']
+
+# nimbus.seeds is supported in HDP 2.3.0.0 and higher
+nimbus_seeds_supported = default('/configurations/storm-env/nimbus_seeds_supported', False)
+nimbus_host = default('/configurations/storm-site/nimbus.host', None)
+nimbus_seeds = default('/configurations/storm-site/nimbus.seeds', None)
+default_topology_max_replication_wait_time_sec = default('/configurations/storm-site/topology.max.replication.wait.time.sec.default', -1)
+nimbus_hosts = default("/clusterHostInfo/nimbus_hosts", [])
+default_topology_min_replication_count = default('/configurations/storm-site/topology.min.replication.count.default', 1)
+
+#Calculate topology.max.replication.wait.time.sec and topology.min.replication.count
+if len(nimbus_hosts) > 1:
+  # for HA Nimbus
+  actual_topology_max_replication_wait_time_sec = -1
+  actual_topology_min_replication_count = len(nimbus_hosts) / 2 + 1
+else:
+  # for non-HA Nimbus
+  actual_topology_max_replication_wait_time_sec = default_topology_max_replication_wait_time_sec
+  actual_topology_min_replication_count = default_topology_min_replication_count
+
+if 'topology.max.replication.wait.time.sec.default' in config['configurations']['storm-site']:
+  del config['configurations']['storm-site']['topology.max.replication.wait.time.sec.default']
+if 'topology.min.replication.count.default' in config['configurations']['storm-site']:
+  del config['configurations']['storm-site']['topology.min.replication.count.default']
+
+rest_api_port = "8745"
+rest_api_admin_port = "8746"
+rest_api_conf_file = format("{conf_dir}/config.yaml")
+storm_env_sh_template = config['configurations']['storm-env']['content']
+jmxremote_port = config['configurations']['storm-env']['jmxremote_port']
+
+if 'ganglia_server_host' in config['clusterHostInfo'] and len(config['clusterHostInfo']['ganglia_server_host'])>0:
+  ganglia_installed = True
+  ganglia_server = config['clusterHostInfo']['ganglia_server_host'][0]
+  ganglia_report_interval = 60
+else:
+  ganglia_installed = False
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+storm_ui_host = default("/clusterHostInfo/storm_ui_server_hosts", [])
+
+storm_user_nofile_limit = default('/configurations/storm-env/storm_user_nofile_limit', 128000)
+storm_user_nproc_limit = default('/configurations/storm-env/storm_user_noproc_limit', 65536)
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  _storm_principal_name = config['configurations']['storm-env']['storm_principal_name']
+  storm_jaas_principal = _storm_principal_name.replace('_HOST',_hostname_lowercase)
+  _ambari_principal_name = default('/configurations/cluster-env/ambari_principal_name', None)
+  storm_keytab_path = config['configurations']['storm-env']['storm_keytab']
+
+  if stack_supports_storm_kerberos:
+    storm_ui_keytab_path = config['configurations']['storm-env']['storm_ui_keytab']
+    _storm_ui_jaas_principal_name = config['configurations']['storm-env']['storm_ui_principal_name']
+    storm_ui_jaas_principal = _storm_ui_jaas_principal_name.replace('_HOST',_hostname_lowercase)
+    storm_bare_jaas_principal = get_bare_principal(_storm_principal_name)
+    if _ambari_principal_name:
+      ambari_bare_jaas_principal = get_bare_principal(_ambari_principal_name)
+    _nimbus_principal_name = config['configurations']['storm-env']['nimbus_principal_name']
+    nimbus_jaas_principal = _nimbus_principal_name.replace('_HOST', _hostname_lowercase)
+    nimbus_bare_jaas_principal = get_bare_principal(_nimbus_principal_name)
+    nimbus_keytab_path = config['configurations']['storm-env']['nimbus_keytab']
+
+kafka_bare_jaas_principal = None
+if stack_supports_storm_kerberos:
+  if security_enabled:
+    storm_thrift_transport = config['configurations']['storm-site']['_storm.thrift.secure.transport']
+    # generate KafkaClient jaas config if kafka is kerberoized
+    _kafka_principal_name = default("/configurations/kafka-env/kafka_principal_name", None)
+    kafka_bare_jaas_principal = get_bare_principal(_kafka_principal_name)
+  else:
+    storm_thrift_transport = config['configurations']['storm-site']['_storm.thrift.nonsecure.transport']
+
+set_instanceId = "false"
+if 'cluster-env' in config['configurations'] and \
+        'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+has_metric_collector = not len(ams_collector_hosts) == 0
+metric_collector_port = None
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_external_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+
+  metric_collector_report_interval = 60
+  metric_collector_app_id = "nimbus"
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+  pass
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+metric_collector_sink_jar = "/usr/lib/storm/lib/ambari-metrics-storm-sink-with-common-*.jar"
+metric_collector_legacy_sink_jar = "/usr/lib/storm/lib/ambari-metrics-storm-sink-legacy-with-common-*.jar"
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
+
+# Cluster Zookeeper quorum
+zookeeper_quorum = ""
+if storm_zookeeper_servers:
+  storm_zookeeper_servers_list = yaml_utils.get_values_from_yaml_array(storm_zookeeper_servers)
+  zookeeper_quorum = (":" + storm_zookeeper_port + ",").join(storm_zookeeper_servers_list)
+  zookeeper_quorum += ":" + storm_zookeeper_port
+
+jar_jvm_opts = ''
+
+########################################################
+############# Atlas related params #####################
+########################################################
+#region Atlas Hooks
+storm_atlas_application_properties = default('/configurations/storm-atlas-application.properties', {})
+enable_atlas_hook = default('/configurations/storm-env/storm.atlas.hook', False)
+atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+
+if enable_atlas_hook:
+  # Only append /etc/atlas/conf to classpath if on HDP 2.4.*
+  if check_stack_feature(StackFeature.ATLAS_CONF_DIR_IN_PATH, stack_version_formatted):
+    atlas_conf_dir = format('{stack_root}/current/atlas-server/conf')
+    jar_jvm_opts += '-Datlas.conf=' + atlas_conf_dir
+#endregion
+
+storm_ui_port = config['configurations']['storm-site']['ui.port']
+
+#Storm log4j properties
+storm_a1_maxfilesize = default('/configurations/storm-cluster-log4j/storm_a1_maxfilesize', 100)
+storm_a1_maxbackupindex = default('/configurations/storm-cluster-log4j/storm_a1_maxbackupindex', 9)
+storm_wrkr_a1_maxfilesize = default('/configurations/storm-worker-log4j/storm_wrkr_a1_maxfilesize', 100)
+storm_wrkr_a1_maxbackupindex = default('/configurations/storm-worker-log4j/storm_wrkr_a1_maxbackupindex', 9)
+storm_wrkr_out_maxfilesize = default('/configurations/storm-worker-log4j/storm_wrkr_out_maxfilesize', 100)
+storm_wrkr_out_maxbackupindex = default('/configurations/storm-worker-log4j/storm_wrkr_out_maxbackupindex', 4)
+storm_wrkr_err_maxfilesize = default('/configurations/storm-worker-log4j/storm_wrkr_err_maxfilesize', 100)
+storm_wrkr_err_maxbackupindex = default('/configurations/storm-worker-log4j/storm_wrkr_err_maxbackupindex', 4)
+
+storm_cluster_log4j_content = config['configurations']['storm-cluster-log4j']['content']
+storm_worker_log4j_content = config['configurations']['storm-worker-log4j']['content']
+
+# some commands may need to supply the JAAS location when running as storm
+storm_jaas_file = format("{conf_dir}/storm_jaas.conf")
+
+# for curl command in ranger plugin to get db connector
+jdk_location = config['hostLevelParams']['jdk_location']
+
+# ranger storm plugin start section
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ambari-server hostname
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+# ranger storm plugin enabled property
+enable_ranger_storm = default("/configurations/ranger-storm-plugin-properties/ranger-storm-plugin-enabled", "No")
+enable_ranger_storm = True if enable_ranger_storm.lower() == 'yes' else False
+
+# ranger storm properties
+if enable_ranger_storm:
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+  if xml_configurations_supported:
+    policymgr_mgr_url = config['configurations']['ranger-storm-security']['ranger.plugin.storm.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  # ranger audit db user
+  xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+
+  # ranger storm service name
+  repo_name = str(config['clusterName']) + '_storm'
+  repo_name_value = config['configurations']['ranger-storm-security']['ranger.plugin.storm.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  common_name_for_certificate = config['configurations']['ranger-storm-plugin-properties']['common.name.for.certificate']
+  repo_config_username = config['configurations']['ranger-storm-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+
+  # ranger-env config
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_storm:
+    external_admin_username = default('/configurations/ranger-storm-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-storm-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-storm-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-storm-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  ranger_plugin_properties = config['configurations']['ranger-storm-plugin-properties']
+  policy_user = storm_user
+  repo_config_password = config['configurations']['ranger-storm-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
+
+  repo_config_password = config['configurations']['ranger-storm-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+
+  downloaded_custom_connector = None
+  previous_jdbc_jar_name = None
+  driver_curl_source = None
+  driver_curl_target = None
+  previous_jdbc_jar = None
+
+  if has_ranger_admin and stack_supports_ranger_audit_db:
+    xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+    jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
+
+    downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_target = format("{storm_component_home_dir}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    previous_jdbc_jar = format("{storm_component_home_dir}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    sql_connector_jar = ''
+
+  storm_ranger_plugin_config = {
+    'username': repo_config_username,
+    'password': repo_config_password,
+    'nimbus.url': 'http://' + storm_ui_host[0].lower() + ':' + str(storm_ui_port),
+    'commonNameForCertificate': common_name_for_certificate
+  }
+
+  storm_ranger_plugin_repo = {
+    'isActive': 'true',
+    'config': json.dumps(storm_ranger_plugin_config),
+    'description': 'storm repo',
+    'name': repo_name,
+    'repositoryType': 'storm',
+    'assetType': '6'
+  }
+
+  custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
+  if len(custom_ranger_service_config) > 0:
+    storm_ranger_plugin_config.update(custom_ranger_service_config)
+
+  if stack_supports_ranger_kerberos and security_enabled:
+    policy_user = format('{storm_user},{storm_bare_jaas_principal}')
+    storm_ranger_plugin_config['policy.download.auth.users'] = policy_user
+    storm_ranger_plugin_config['tag.download.auth.users'] = policy_user
+    storm_ranger_plugin_config['ambari.service.check.user'] = policy_user
+
+    storm_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': storm_ranger_plugin_config,
+      'description': 'storm repo',
+      'name': repo_name,
+      'type': 'storm'
+    }
+
+  ranger_storm_principal = None
+  ranger_storm_keytab = None
+  if stack_supports_ranger_kerberos and security_enabled:
+    ranger_storm_principal = storm_jaas_principal
+    ranger_storm_keytab = storm_keytab_path
+
+  xa_audit_db_is_enabled = False
+  if xml_configurations_supported and stack_supports_ranger_audit_db:
+    xa_audit_db_is_enabled = config['configurations']['ranger-storm-audit']['xasecure.audit.destination.db']
+
+  xa_audit_hdfs_is_enabled = default('/configurations/ranger-storm-audit/xasecure.audit.destination.hdfs', False)
+  ssl_keystore_password = config['configurations']['ranger-storm-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
+  ssl_truststore_password = config['configurations']['ranger-storm-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+
+  # for SQLA explicitly disable audit to DB for Ranger
+  if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
+    xa_audit_db_is_enabled = False
+
+# ranger storm plugin end section
+
+namenode_hosts = default("/clusterHostInfo/namenode_host", [])
+has_namenode = not len(namenode_hosts) == 0
+
+availableServices = config['availableServices']
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
+hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
+default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin") if has_namenode else None
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources()
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params_windows.py
new file mode 100644
index 0000000..a758375
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params_windows.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from status_params import *
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.default import default
+
+# server configurations
+config = Script.get_config()
+
+stack_is_hdp23_or_further = Script.is_stack_greater_or_equal("2.3")
+
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+conf_dir = os.environ["STORM_CONF_DIR"]
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+storm_user = hadoop_user
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+default_topology_max_replication_wait_time_sec = default('/configurations/storm-site/topology.max.replication.wait.time.sec.default', -1)
+nimbus_hosts = default("/clusterHostInfo/nimbus_hosts", [])
+default_topology_min_replication_count = default('/configurations/storm-site/topology.min.replication.count.default', 1)
+
+#Calculate topology.max.replication.wait.time.sec and topology.min.replication.count
+if len(nimbus_hosts) > 1:
+  # for HA Nimbus
+  actual_topology_max_replication_wait_time_sec = -1
+  actual_topology_min_replication_count = len(nimbus_hosts) / 2 + 1
+else:
+  # for non-HA Nimbus
+  actual_topology_max_replication_wait_time_sec = default_topology_max_replication_wait_time_sec
+  actual_topology_min_replication_count = default_topology_min_replication_count
+
+if stack_is_hdp23_or_further:
+  if security_enabled:
+    storm_thrift_transport = config['configurations']['storm-site']['_storm.thrift.secure.transport']
+  else:
+    storm_thrift_transport = config['configurations']['storm-site']['_storm.thrift.nonsecure.transport']
+
+service_map = {
+  "nimbus" : nimbus_win_service_name,
+  "supervisor" : supervisor_win_service_name,
+  "ui" : ui_win_service_name
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/rest_api.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/rest_api.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/rest_api.py
new file mode 100644
index 0000000..f9b3b80
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/rest_api.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import Execute
+
+from storm import storm
+from service import service
+from service_check import ServiceCheck
+
+
+class StormRestApi(Script):
+  """
+  Storm REST API.
+  It was available in HDP 2.0 and 2.1.
+  In HDP 2.2, it was removed since the functionality was moved to Storm UI Server.
+  """
+
+  def get_component_name(self):
+    return "storm-client"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    storm()
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("rest_api", action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    service("rest_api", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_rest_api)
+
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.storm_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_rest_api]
+  
+if __name__ == "__main__":
+  StormRestApi().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/service.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/service.py
new file mode 100644
index 0000000..b5e5cd5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/service.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.core.resources import Execute
+from resource_management.core.resources import File
+from resource_management.core.shell import as_user
+from resource_management.core import shell
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import get_user_call_output
+from resource_management.libraries.functions.show_logs import show_logs
+import time
+
+
+def service(name, action = 'start'):
+  import params
+  import status_params
+
+  pid_file = status_params.pid_files[name]
+  no_op_test = as_user(format(
+    "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.storm_user)
+
+  if name == 'ui':
+    process_grep = "storm.ui.core$"
+  elif name == "rest_api":
+    process_grep = format("{rest_lib_dir}/storm-rest-.*\.jar$")
+  else:
+    process_grep = format("storm.daemon.{name}$")
+
+  find_proc = format("{jps_binary} -l  | grep {process_grep}")
+  write_pid = format("{find_proc} | awk {{'print $1'}} > {pid_file}")
+  crt_pid_cmd = format("{find_proc} && {write_pid}")
+  storm_env = format(
+    "source {conf_dir}/storm-env.sh ; export PATH=$JAVA_HOME/bin:$PATH")
+
+  if action == "start":
+    if name == "rest_api":
+      process_cmd = format(
+        "{storm_env} ; java -jar {rest_lib_dir}/`ls {rest_lib_dir} | grep -wE storm-rest-[0-9.-]+\.jar` server")
+      cmd = format(
+        "{process_cmd} {rest_api_conf_file} > {log_dir}/restapi.log 2>&1")
+    else:
+      # Storm start script gets forked into actual storm java process.
+      # Which means we can use the pid of start script as a pid of start component
+      cmd = format("{storm_env} ; storm {name} > {log_dir}/{name}.out 2>&1")
+
+    cmd = format("{cmd} &\n echo $! > {pid_file}")
+    
+    Execute(cmd,
+      not_if = no_op_test,
+      user = params.storm_user,
+      path = params.storm_bin_dir,
+    )
+    
+    File(pid_file,
+         owner = params.storm_user,
+         group = params.user_group
+    )
+  elif action == "stop":
+    process_dont_exist = format("! ({no_op_test})")
+    if os.path.exists(pid_file):
+      pid = get_user_call_output.get_user_call_output(format("! test -f {pid_file} ||  cat {pid_file}"), user=params.storm_user)[1]
+
+      # if multiple processes are running (for example user can start logviewer from console)
+      # there can be more than one id
+      pid = pid.replace("\n", " ")
+
+      Execute(format("{sudo} kill {pid}"),
+        not_if = process_dont_exist)
+
+      Execute(format("{sudo} kill -9 {pid}"),
+        not_if = format(
+          "sleep 2; {process_dont_exist} || sleep 20; {process_dont_exist}"),
+        ignore_failures = True)
+
+      File(pid_file, action = "delete")

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/service_check.py
new file mode 100644
index 0000000..80ea0f5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/service_check.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import get_unique_id_and_date
+from resource_management.core.resources import File
+from resource_management.core.resources import Execute
+from resource_management.libraries.script import Script
+from resource_management.core.source import StaticFile
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+class ServiceCheck(Script):
+  pass
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class ServiceCheckWindows(ServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
+    service = "STORM"
+    Execute(format("cmd /C {smoke_cmd} {service}", smoke_cmd=smoke_cmd, service=service), user=params.storm_user, logoutput=True)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class ServiceCheckDefault(ServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    unique = get_unique_id_and_date()
+
+    File("/tmp/wordCount.jar",
+         content=StaticFile("wordCount.jar"),
+         owner=params.storm_user
+    )
+
+    cmd = ""
+    if params.nimbus_seeds_supported:
+      # Because this command is guaranteed to run on one of the hosts with storm client, there is no need
+      # to specify "-c nimbus.seeds={nimbus_seeds}"
+      cmd = format("storm jar /tmp/wordCount.jar storm.starter.WordCountTopology WordCount{unique}")
+    elif params.nimbus_host is not None:
+      cmd = format("storm jar /tmp/wordCount.jar storm.starter.WordCountTopology WordCount{unique} -c nimbus.host={nimbus_host}")
+
+    Execute(cmd,
+            logoutput=True,
+            path=params.storm_bin_dir,
+            user=params.storm_user
+    )
+
+    Execute(format("storm kill WordCount{unique}"),
+            path=params.storm_bin_dir,
+            user=params.storm_user
+    )
+
+if __name__ == "__main__":
+  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/setup_ranger_storm.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/setup_ranger_storm.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/setup_ranger_storm.py
new file mode 100644
index 0000000..c04496e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/setup_ranger_storm.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_core_site_for_required_plugins
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources import File, Directory
+
+def setup_ranger_storm(upgrade_type=None):
+  """
+  :param upgrade_type: Upgrade Type such as "rolling" or "nonrolling"
+  """
+  import params
+  if params.enable_ranger_storm and params.security_enabled:
+
+    stack_version = None
+    if upgrade_type is not None:
+      stack_version = params.version
+
+    if params.retryAble:
+      Logger.info("Storm: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("Storm: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+    if params.xml_configurations_supported and params.enable_ranger_storm and params.xa_audit_hdfs_is_enabled:
+      if params.has_namenode:
+        params.HdfsResource("/ranger/audit",
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.hdfs_user,
+                           group=params.hdfs_user,
+                           mode=0755,
+                           recursive_chmod=True
+        )
+        params.HdfsResource("/ranger/audit/storm",
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.storm_user,
+                           group=params.storm_user,
+                           mode=0700,
+                           recursive_chmod=True
+        )
+        params.HdfsResource(None, action="execute")
+
+    if params.xml_configurations_supported:
+      api_version=None
+      if params.stack_supports_ranger_kerberos:
+        api_version='v2'
+      from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+      setup_ranger_plugin('storm-nimbus', 'storm', params.previous_jdbc_jar,
+                          params.downloaded_custom_connector, params.driver_curl_source,
+                          params.driver_curl_target, params.java64_home,
+                          params.repo_name, params.storm_ranger_plugin_repo,
+                          params.ranger_env, params.ranger_plugin_properties,
+                          params.policy_user, params.policymgr_mgr_url,
+                          params.enable_ranger_storm, conf_dict=params.conf_dir,
+                          component_user=params.storm_user, component_group=params.user_group, cache_service_list=['storm'],
+                          plugin_audit_properties=params.config['configurations']['ranger-storm-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-storm-audit'],
+                          plugin_security_properties=params.config['configurations']['ranger-storm-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-storm-security'],
+                          plugin_policymgr_ssl_properties=params.config['configurations']['ranger-storm-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-storm-policymgr-ssl'],
+                          component_list=['storm-client', 'storm-nimbus'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                          credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                          ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                          stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble,api_version=api_version,
+                          is_security_enabled = params.security_enabled,
+                          is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+                          component_user_principal=params.ranger_storm_principal if params.security_enabled else None,
+                          component_user_keytab=params.ranger_storm_keytab if params.security_enabled else None)
+    else:
+      from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
+      setup_ranger_plugin('storm-nimbus', 'storm', params.previous_jdbc_jar,
+                        params.downloaded_custom_connector, params.driver_curl_source,
+                        params.driver_curl_target, params.java64_home,
+                        params.repo_name, params.storm_ranger_plugin_repo,
+                        params.ranger_env, params.ranger_plugin_properties,
+                        params.policy_user, params.policymgr_mgr_url,
+                        params.enable_ranger_storm, conf_dict=params.conf_dir,
+                        component_user=params.storm_user, component_group=params.user_group, cache_service_list=['storm'],
+                        plugin_audit_properties=params.config['configurations']['ranger-storm-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-storm-audit'],
+                        plugin_security_properties=params.config['configurations']['ranger-storm-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-storm-security'],
+                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-storm-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-storm-policymgr-ssl'],
+                        component_list=['storm-client', 'storm-nimbus'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
+
+
+    site_files_create_path = format('{storm_component_home_dir}/extlib-daemon/ranger-storm-plugin-impl/conf')
+    Directory(site_files_create_path,
+            owner = params.storm_user,
+            group = params.user_group,
+            mode=0775,
+            create_parents = True,
+            cd_access = 'a'
+            )
+
+    if params.stack_supports_core_site_for_ranger_plugin and params.enable_ranger_storm and params.has_namenode and params.security_enabled:
+      Logger.info("Stack supports core-site.xml creation for Ranger plugin, creating create core-site.xml from namenode configuraitions")
+      setup_core_site_for_required_plugins(component_user=params.storm_user,component_group=params.user_group,create_core_site_path = site_files_create_path, config = params.config)
+      if len(params.namenode_hosts) > 1:
+        Logger.info('Ranger Storm plugin is enabled along with security and NameNode is HA , creating hdfs-site.xml')
+        XmlConfig("hdfs-site.xml",
+          conf_dir=site_files_create_path,
+          configurations=params.config['configurations']['hdfs-site'],
+          configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+          owner=params.storm_user,
+          group=params.user_group,
+          mode=0644
+        )
+      else:
+        Logger.info('Ranger Storm plugin is not enabled or security is disabled, removing hdfs-site.xml')
+        File(format('{site_files_create_path}/hdfs-site.xml'), action="delete")
+    else:
+      Logger.info("Stack does not support core-site.xml creation for Ranger plugin, skipping core-site.xml configurations")
+  else:
+    Logger.info('Ranger Storm plugin is not enabled')

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/status_params.py
new file mode 100644
index 0000000..d84b095
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/status_params.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import default, format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from ambari_commons import OSCheck
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'NIMBUS' : 'storm-nimbus',
+  'SUPERVISOR' : 'storm-supervisor',
+  'STORM_UI_SERVER' : 'storm-client',
+  'DRPC_SERVER' : 'storm-client',
+  'STORM_SERVICE_CHECK' : 'storm-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "STORM_SERVICE_CHECK")
+
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+if OSCheck.is_windows_family():
+  nimbus_win_service_name = "nimbus"
+  supervisor_win_service_name = "supervisor"
+  ui_win_service_name = "ui"
+else:
+  pid_dir = config['configurations']['storm-env']['storm_pid_dir']
+  pid_nimbus = format("{pid_dir}/nimbus.pid")
+  pid_supervisor = format("{pid_dir}/supervisor.pid")
+  pid_drpc = format("{pid_dir}/drpc.pid")
+  pid_ui = format("{pid_dir}/ui.pid")
+  pid_logviewer = format("{pid_dir}/logviewer.pid")
+  pid_rest_api = format("{pid_dir}/restapi.pid")
+
+  pid_files = {
+    "logviewer":pid_logviewer,
+    "ui": pid_ui,
+    "nimbus": pid_nimbus,
+    "supervisor": pid_supervisor,
+    "drpc": pid_drpc,
+    "rest_api": pid_rest_api
+  }
+
+  # Security related/required params
+  hostname = config['hostname']
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  tmp_dir = Script.get_tmp_dir()
+
+  storm_component_home_dir = "/usr/lib/storm"
+  conf_dir = "/etc/storm/conf"
+  if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+    storm_component_home_dir = format("{stack_root}/current/{component_directory}")
+    conf_dir = format("{stack_root}/current/{component_directory}/conf")
+
+  storm_user = config['configurations']['storm-env']['storm_user']
+  storm_ui_principal = default('/configurations/storm-env/storm_ui_principal_name', None)
+  storm_ui_keytab = default('/configurations/storm-env/storm_ui_keytab', None)
+
+stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm.py
new file mode 100644
index 0000000..99579d2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm.py
@@ -0,0 +1,182 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.core.resources.system import Directory, Execute, File, Link
+from resource_management.core.source import InlineTemplate
+from resource_management.libraries.resources.template_config import TemplateConfig
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.script.script import Script
+from resource_management.core.source import Template
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from storm_yaml_utils import yaml_config_template, yaml_config
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook, setup_atlas_jar_symlinks
+from ambari_commons.constants import SERVICE
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def storm(name=None):
+  import params
+  yaml_config("storm.yaml",
+              conf_dir=params.conf_dir,
+              configurations=params.config['configurations']['storm-site'],
+              owner=params.storm_user
+  )
+
+  if params.service_map.has_key(name):
+    service_name = params.service_map[name]
+    ServiceConfig(service_name,
+                  action="change_user",
+                  username = params.storm_user,
+                  password = Script.get_password(params.storm_user))
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def storm(name=None):
+  import params
+  import os
+
+  Directory(params.log_dir,
+            owner=params.storm_user,
+            group=params.user_group,
+            mode=0777,
+            create_parents = True,
+            cd_access="a",
+  )
+
+  Directory([params.pid_dir, params.local_dir],
+            owner=params.storm_user,
+            group=params.user_group,
+            create_parents = True,
+            cd_access="a",
+            mode=0755,
+  )
+
+  Directory(params.conf_dir,
+            group=params.user_group,
+            create_parents = True,
+            cd_access="a",
+  )
+
+  File(format("{limits_conf_dir}/storm.conf"),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("storm.conf.j2")
+  )
+
+  File(format("{conf_dir}/config.yaml"),
+       content=Template("config.yaml.j2"),
+       owner=params.storm_user,
+       group=params.user_group
+  )
+
+  configurations = params.config['configurations']['storm-site']
+
+  File(format("{conf_dir}/storm.yaml"),
+       content=yaml_config_template(configurations),
+       owner=params.storm_user,
+       group=params.user_group
+  )
+
+  File(format("{conf_dir}/storm-env.sh"),
+       owner=params.storm_user,
+       content=InlineTemplate(params.storm_env_sh_template)
+  )
+
+  # Generate atlas-application.properties.xml file and symlink the hook jars
+  if params.enable_atlas_hook:
+    atlas_hook_filepath = os.path.join(params.conf_dir, params.atlas_hook_filename)
+    setup_atlas_hook(SERVICE.STORM, params.storm_atlas_application_properties, atlas_hook_filepath, params.storm_user, params.user_group)
+    storm_extlib_dir = os.path.join(params.storm_component_home_dir, "extlib")
+    setup_atlas_jar_symlinks("storm", storm_extlib_dir)
+
+  if params.has_metric_collector:
+    File(format("{conf_dir}/storm-metrics2.properties"),
+        owner=params.storm_user,
+        group=params.user_group,
+        content=Template("storm-metrics2.properties.j2")
+    )
+
+    # Remove symlinks. They can be there, if you doing upgrade from HDP < 2.2 to HDP >= 2.2
+    Link(format("{storm_lib_dir}/ambari-metrics-storm-sink.jar"),
+         action="delete")
+    # On old HDP 2.1 versions, this symlink may also exist and break EU to newer versions
+    Link("/usr/lib/storm/lib/ambari-metrics-storm-sink.jar", action="delete")
+
+    if check_stack_feature(StackFeature.STORM_METRICS_APACHE_CLASSES, params.version_for_stack_feature_checks):
+      sink_jar = params.metric_collector_sink_jar
+    else:
+      sink_jar = params.metric_collector_legacy_sink_jar
+
+    Execute(format("{sudo} ln -s {sink_jar} {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
+            not_if=format("ls {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
+            only_if=format("ls {sink_jar}")
+    )
+
+  if params.storm_logs_supported:
+    Directory(params.log4j_dir,
+              owner=params.storm_user,
+              group=params.user_group,
+              mode=0755,
+              create_parents = True
+    )
+    
+    File(format("{log4j_dir}/cluster.xml"),
+      owner=params.storm_user,
+      content=InlineTemplate(params.storm_cluster_log4j_content)
+    )
+    File(format("{log4j_dir}/worker.xml"),
+      owner=params.storm_user,
+      content=InlineTemplate(params.storm_worker_log4j_content)
+    )
+
+  if params.security_enabled:
+    TemplateConfig(format("{conf_dir}/storm_jaas.conf"),
+                   owner=params.storm_user
+    )
+    if params.stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted):
+      TemplateConfig(format("{conf_dir}/client_jaas.conf"),
+                     owner=params.storm_user
+      )
+      minRuid = configurations['_storm.min.ruid'] if configurations.has_key('_storm.min.ruid') else ''
+      
+      min_user_ruid = int(minRuid) if minRuid.isdigit() else _find_real_user_min_uid()
+      
+      File(format("{conf_dir}/worker-launcher.cfg"),
+           content=Template("worker-launcher.cfg.j2", min_user_ruid = min_user_ruid),
+           owner='root',
+           group=params.user_group
+      )
+
+
+'''
+Finds minimal real user UID
+'''
+def _find_real_user_min_uid():
+  with open('/etc/login.defs') as f:
+    for line in f:
+      if line.strip().startswith('UID_MIN') and len(line.split()) == 2 and line.split()[1].isdigit():
+        return int(line.split()[1])
+  raise Fail("Unable to find UID_MIN in file /etc/login.defs. Expecting format e.g.: 'UID_MIN    500'")  

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm_upgrade.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm_upgrade.py
new file mode 100644
index 0000000..bc245c4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm_upgrade.py
@@ -0,0 +1,177 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import os
+
+from ambari_commons import yaml_utils
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Directory
+from resource_management.core.resources.system import File
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+
+class StormUpgrade(Script):
+  """
+  Applies to Rolling/Express Upgrade from HDP 2.1 or 2.2 to 2.3 or higher.
+
+  Requirements: Needs to run from a host with ZooKeeper Client.
+
+  This class helps perform some of the upgrade tasks needed for Storm during
+  a Rolling or Express upgrade. Storm writes data to disk locally and to ZooKeeper.
+  If any HDP 2.1 or 2.2 bits exist in these directories when an HDP 2.3 instance
+  starts up, it will fail to start properly. Because the upgrade framework in
+  Ambari doesn't yet have a mechanism to say "stop all" before starting to
+  upgrade each component, we need to rely on a Storm trick to bring down
+  running daemons. By removing the ZooKeeper data with running daemons, those
+  daemons will die.
+  """
+
+  def delete_storm_zookeeper_data(self, env):
+    """
+    Deletes the Storm data from ZooKeeper, effectively bringing down all
+    Storm daemons.
+    :return:
+    """
+    import params
+
+    Logger.info('Clearing Storm data from ZooKeeper')
+
+    storm_zookeeper_root_dir = params.storm_zookeeper_root_dir
+    if storm_zookeeper_root_dir is None:
+      raise Fail("The storm ZooKeeper directory specified by storm-site/storm.zookeeper.root must be specified")
+
+    # The zookeeper client must be given a zookeeper host to contact. Guaranteed to have at least one host.
+    storm_zookeeper_server_list = yaml_utils.get_values_from_yaml_array(params.storm_zookeeper_servers)
+    if storm_zookeeper_server_list is None:
+      Logger.info("Unable to extract ZooKeeper hosts from '{0}', assuming localhost").format(params.storm_zookeeper_servers)
+      storm_zookeeper_server_list = ["localhost"]
+
+    # For every zk server, try to remove /storm
+    zookeeper_data_cleared = False
+    for storm_zookeeper_server in storm_zookeeper_server_list:
+      # Determine where the zkCli.sh shell script is
+      zk_command_location = os.path.join(params.stack_root, "current", "zookeeper-client", "bin", "zkCli.sh")
+      if params.version is not None:
+        zk_command_location = os.path.join(params.stack_root, params.version, "zookeeper", "bin", "zkCli.sh")
+
+      # create the ZooKeeper delete command
+      command = "{0} -server {1}:{2} rmr /storm".format(
+        zk_command_location, storm_zookeeper_server, params.storm_zookeeper_port)
+
+      # clean out ZK
+      try:
+        # the ZK client requires Java to run; ensure it's on the path
+        env_map = {
+          'JAVA_HOME': params.java64_home
+        }
+
+        # AMBARI-12094: if security is enabled, then we need to tell zookeeper where the
+        # JAAS file is located since we don't use kinit directly with STORM
+        if params.security_enabled:
+          env_map['JVMFLAGS'] = "-Djava.security.auth.login.config={0}".format(params.storm_jaas_file)
+
+        Execute(command, user=params.storm_user, environment=env_map,
+          logoutput=True, tries=1)
+
+        zookeeper_data_cleared = True
+        break
+      except:
+        # the command failed, try a different ZK server
+        pass
+
+    # fail if the ZK data could not be cleared
+    if not zookeeper_data_cleared:
+      raise Fail("Unable to clear ZooKeeper Storm data on any of the following ZooKeeper hosts: {0}".format(
+        storm_zookeeper_server_list))
+
+
+  def delete_storm_local_data(self, env):
+    """
+    Deletes Storm data from local directories. This will create a marker file
+    with JSON data representing the upgrade stack and request/stage ID. This
+    will prevent multiple Storm components on the same host from removing
+    the local directories more than once.
+    :return:
+    """
+    import params
+
+    Logger.info('Clearing Storm data from local directories...')
+
+    storm_local_directory = params.local_dir
+    if storm_local_directory is None:
+      raise Fail("The storm local directory specified by storm-site/storm.local.dir must be specified")
+
+    request_id = default("/requestId", None)
+
+    stack_name = params.stack_name
+    stack_version = params.version
+    upgrade_direction = params.upgrade_direction
+
+    json_map = {}
+    json_map["requestId"] = request_id
+    json_map["stackName"] = stack_name
+    json_map["stackVersion"] = stack_version
+    json_map["direction"] = upgrade_direction
+
+    temp_directory = params.tmp_dir
+    marker_file = os.path.join(temp_directory, "storm-upgrade-{0}.json".format(stack_version))
+    Logger.info("Marker file for upgrade/downgrade of Storm, {0}".format(marker_file))
+
+    if os.path.exists(marker_file):
+      Logger.info("The marker file exists.")
+      try:
+        with open(marker_file) as file_pointer:
+          existing_json_map = json.load(file_pointer)
+
+        if cmp(json_map, existing_json_map) == 0:
+          Logger.info("The storm upgrade has already removed the local directories for {0}-{1} for "
+                      "request {2} and direction {3}. Nothing else to do.".format(stack_name, stack_version, request_id, upgrade_direction))
+
+          # Nothing else to do here for this as it appears to have already been
+          # removed by another component being upgraded
+          return
+        else:
+          Logger.info("The marker file differs from the new value. Will proceed to delete Storm local dir, "
+                      "and generate new file. Current marker file: {0}".format(str(existing_json_map)))
+      except Exception, e:
+        Logger.error("The marker file {0} appears to be corrupt; removing it. Error: {1}".format(marker_file, str(e)))
+        File(marker_file, action="delete")
+    else:
+      Logger.info('The marker file {0} does not exist; will attempt to delete local Storm directory if it exists.'.format(marker_file))
+
+    # Delete from local directory
+    if os.path.isdir(storm_local_directory):
+      Logger.info("Deleting storm local directory, {0}".format(storm_local_directory))
+      Directory(storm_local_directory, action="delete", create_parents = True)
+
+    # Recreate storm local directory
+    Logger.info("Recreating storm local directory, {0}".format(storm_local_directory))
+    Directory(storm_local_directory, mode=0755, owner=params.storm_user,
+      group=params.user_group, create_parents = True)
+
+    # The file doesn't exist, so create it
+    Logger.info("Saving marker file to {0} with contents: {1}".format(marker_file, str(json_map)))
+    with open(marker_file, 'w') as file_pointer:
+      json.dump(json_map, file_pointer, indent=2)
+
+if __name__ == "__main__":
+  StormUpgrade().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm_yaml_utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm_yaml_utils.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm_yaml_utils.py
new file mode 100644
index 0000000..9d78e71
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm_yaml_utils.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import resource_management
+
+from ambari_commons.yaml_utils import escape_yaml_property
+from resource_management.core.source import InlineTemplate
+from resource_management.core.resources.system import File
+
+def replace_jaas_placeholder(name, security_enabled, conf_dir):
+  if name.find('_JAAS_PLACEHOLDER') > -1:
+    if security_enabled:
+      return name.replace('_JAAS_PLACEHOLDER', '-Djava.security.auth.login.config=' + conf_dir + '/storm_jaas.conf')
+    else:
+      return name.replace('_JAAS_PLACEHOLDER', '')
+  else:
+    return name
+
+storm_yaml_template = """{% for key, value in configurations|dictsort if not key.startswith('_') %}{{key}} : {{ escape_yaml_property(replace_jaas_placeholder(resource_management.core.source.InlineTemplate(value).get_content().strip(), security_enabled, conf_dir)) }}
+{% endfor %}"""
+
+def yaml_config_template(configurations):
+  return InlineTemplate(storm_yaml_template, configurations=configurations,
+                        extra_imports=[escape_yaml_property, replace_jaas_placeholder, resource_management,
+                                       resource_management.core, resource_management.core.source])
+
+def yaml_config(filename, configurations = None, conf_dir = None, owner = None, group = None):
+  import params
+  config_content = InlineTemplate('''{% for key, value in configurations_dict|dictsort %}{{ key }}: {{ escape_yaml_property(resource_management.core.source.InlineTemplate(value).get_content()) }}
+{% endfor %}''', configurations_dict=configurations, extra_imports=[escape_yaml_property, resource_management, resource_management.core, resource_management.core.source])
+
+  File (os.path.join(params.conf_dir, filename),
+        content = config_content,
+        owner = owner,
+        mode = "f"
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisor.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisor.py
new file mode 100644
index 0000000..ec3f533
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisor.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from storm import storm
+from service import service
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.resources.service import Service
+
+
+class Supervisor(Script):
+  def get_component_name(self):
+    return "storm-supervisor"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    storm("supervisor")
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class SupervisorWindows(Supervisor):
+  def start(self, env):
+    import status_params
+    env.set_params(status_params)
+    self.configure(env)
+    Service(status_params.supervisor_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.supervisor_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+    env.set_params(status_params)
+    check_windows_service_status(status_params.supervisor_win_service_name)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class SupervisorDefault(Supervisor):
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "storm", params.version)
+      stack_select.select("storm-client", params.version)
+      stack_select.select("storm-supervisor", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("supervisor", action="start")
+    service("logviewer", action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    service("supervisor", action="stop")
+    service("logviewer", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_supervisor)
+
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.storm_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_supervisor]
+
+if __name__ == "__main__":
+  Supervisor().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisor_prod.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisor_prod.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisor_prod.py
new file mode 100644
index 0000000..d6c3545
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisor_prod.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from storm import storm
+from service import service
+from supervisord_service import supervisord_service, supervisord_check_status
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+
+
+class Supervisor(Script):
+
+  def get_component_name(self):
+    return "storm-supervisor"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    storm()
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "storm", params.version)
+      stack_select.select("storm-client", params.version)
+      stack_select.select("storm-supervisor", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    supervisord_service("supervisor", action="start")
+    service("logviewer", action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    supervisord_service("supervisor", action="stop")
+    service("logviewer", action="stop")
+
+  def status(self, env):
+    supervisord_check_status("supervisor")
+    
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.storm_user
+
+if __name__ == "__main__":
+  Supervisor().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisord_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisord_service.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisord_service.py
new file mode 100644
index 0000000..6ff9f9c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisord_service.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.format import format
+
+def supervisord_service(component_name, action):
+  Execute(format("supervisorctl {action} storm-{component_name}"),
+    wait_for_finish=False
+  )
+
+def supervisord_check_status(component_name):
+  try:
+    Execute(format("supervisorctl status storm-{component_name} | grep RUNNING"))
+  except Fail:
+    raise ComponentIsNotRunning() 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/ui_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/ui_server.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/ui_server.py
new file mode 100644
index 0000000..e257ef9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/ui_server.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from storm import storm
+from service import service
+from service_check import ServiceCheck
+from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import Link
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_JAAS_CONF
+from setup_ranger_storm import setup_ranger_storm
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.resources.service import Service
+
+
+class UiServer(Script):
+
+  def get_component_name(self):
+    return "storm-client"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    storm("ui")
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class UiServerWindows(UiServer):
+  def start(self, env):
+    import status_params
+    env.set_params(status_params)
+    self.configure(env)
+    Service(status_params.ui_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.ui_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+    check_windows_service_status(status_params.ui_win_service_name)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class UiServerDefault(UiServer):
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "storm", params.version)
+      stack_select.select("storm-client", params.version)
+
+  def link_metrics_sink_jar(self):
+    import params
+    # Add storm metrics reporter JAR to storm-ui-server classpath.
+    # Remove symlinks. They can be there, if you doing upgrade from HDP < 2.2 to HDP >= 2.2
+    Link(format("{storm_lib_dir}/ambari-metrics-storm-sink.jar"),
+         action="delete")
+    # On old HDP 2.1 versions, this symlink may also exist and break EU to newer versions
+    Link("/usr/lib/storm/lib/ambari-metrics-storm-sink.jar", action="delete")
+
+    if check_stack_feature(StackFeature.STORM_METRICS_APACHE_CLASSES, params.version_for_stack_feature_checks):
+      sink_jar = params.metric_collector_sink_jar
+    else:
+      sink_jar = params.metric_collector_legacy_sink_jar
+
+    Execute(format("{sudo} ln -s {sink_jar} {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
+            not_if=format("ls {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
+            only_if=format("ls {sink_jar}")
+            )
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    self.link_metrics_sink_jar()
+    setup_ranger_storm(upgrade_type=upgrade_type)
+    service("ui", action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    service("ui", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_ui)
+      
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.storm_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_ui]
+
+if __name__ == "__main__":
+  UiServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/client_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/client_jaas.conf.j2
new file mode 100644
index 0000000..b061cd1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/client_jaas.conf.j2
@@ -0,0 +1,33 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+StormClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useTicketCache=true
+   renewTicket=true
+   serviceName="{{nimbus_bare_jaas_principal}}";
+};
+
+{% if kafka_bare_jaas_principal %}
+KafkaClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useTicketCache=true
+   renewTicket=true
+   serviceName="{{kafka_bare_jaas_principal}}";
+};
+{% endif %}


[09/50] [abbrv] ambari git commit: AMBARI-21044 Add service wizard unresponsive while adding HST via Ambari. (ababiichuk)

Posted by ad...@apache.org.
AMBARI-21044 Add service wizard unresponsive while adding HST via Ambari. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/292db86f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/292db86f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/292db86f

Branch: refs/heads/ambari-rest-api-explorer
Commit: 292db86f02e236615dc67576f66b856657898f17
Parents: bba703b
Author: ababiichuk <ab...@hortonworks.com>
Authored: Wed May 17 21:17:17 2017 +0300
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Thu May 18 00:21:20 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/controllers/installer.js         | 15 ----
 .../journalNode/step1_controller.js             |  2 +-
 .../service/manage_config_groups_controller.js  | 89 ++++++++++++++++++--
 ambari-web/app/controllers/wizard.js            | 38 +--------
 .../wizard/step7/assign_master_controller.js    | 16 +++-
 .../mixins/wizard/assign_master_components.js   | 77 ++++++++++++-----
 .../app/mixins/wizard/wizardHostsLoading.js     |  6 +-
 ambari-web/app/routes/add_service_routes.js     |  1 +
 ambari-web/app/routes/installer.js              |  1 +
 ambari-web/app/utils/ajax/ajax.js               |  4 +
 .../common/assign_master_components_view.js     |  4 +
 .../app/views/main/service/reassign_view.js     |  4 -
 ambari-web/test/controllers/installer_test.js   | 12 ---
 .../journalNode/step1_controller_test.js        |  4 +-
 .../main/service/add_controller_test.js         | 54 +-----------
 .../test/controllers/wizard/step5_test.js       | 86 +++++++++++++------
 ambari-web/test/controllers/wizard_test.js      | 30 +------
 .../resourceManager/wizard_view_test.js         | 18 +---
 .../views/main/service/reassign_view_test.js    | 12 ---
 19 files changed, 236 insertions(+), 237 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/app/controllers/installer.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/installer.js b/ambari-web/app/controllers/installer.js
index ec280fa..db9623a 100644
--- a/ambari-web/app/controllers/installer.js
+++ b/ambari-web/app/controllers/installer.js
@@ -203,24 +203,9 @@ App.InstallerController = App.WizardController.extend(App.Persist, {
 
     for (var hostName in rawHosts) {
       var host = rawHosts[hostName];
-      var disksOverallCapacity = 0;
-      var diskFree = 0;
-      host.disk_info.forEach(function (disk) {
-        disksOverallCapacity += parseFloat(disk.size);
-        diskFree += parseFloat(disk.available);
-      });
       hosts.pushObject(Em.Object.create({
           id: host.name,
-          ip: host.ip,
-          osType: host.os_type,
-          osArch: host.os_arch,
           hostName: host.name,
-          publicHostName: host.name,
-          cpu: host.cpu,
-          memory: host.memory,
-          diskInfo: host.disk_info,
-          diskTotal: disksOverallCapacity / (1024 * 1024),
-          diskFree: diskFree / (1024 * 1024),
           hostComponents: host.hostComponents || []
         }
       ))

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js
index b54986b..197596c 100644
--- a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js
+++ b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js
@@ -105,7 +105,7 @@ App.ManageJournalNodeWizardStep1Controller = Em.Controller.extend(App.BlueprintM
     self.get('addableComponents').forEach(function (componentName) {
       self.updateComponent(componentName);
     }, self);
-    self.set('isLoaded', true);
+    self.set('isRecommendationsLoaded', true);
   },
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/app/controllers/main/service/manage_config_groups_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/manage_config_groups_controller.js b/ambari-web/app/controllers/main/service/manage_config_groups_controller.js
index 45c840a..a260652 100644
--- a/ambari-web/app/controllers/main/service/manage_config_groups_controller.js
+++ b/ambari-web/app/controllers/main/service/manage_config_groups_controller.js
@@ -289,15 +289,13 @@ App.ManageConfigGroupsController = Em.Controller.extend(App.ConfigOverridable, {
    */
   loadHosts: function() {
     this.set('isLoaded', false);
-    if (this.get('isInstaller')) {
-      var allHosts = this.get('isAddService') ? App.router.get('addServiceController').get('allHosts') : App.router.get('installerController').get('allHosts');
-      this.set('clusterHosts', allHosts);
-      this.loadConfigGroups(this.get('serviceName'));
-    }
-    else {
+    if (this.get('isInstaller') && !this.get('isAddService')) {
+      var hostNames = App.router.get('installerController').get('allHosts').mapProperty('hostName').join();
+      this.loadInstallerHostsFromServer(hostNames);
+    } else {
       this.loadHostsFromServer();
-      this.loadConfigGroups(this.get('serviceName'));
     }
+    this.loadConfigGroups(this.get('serviceName'));
   },
 
   /**
@@ -1000,6 +998,83 @@ App.ManageConfigGroupsController = Em.Controller.extend(App.ConfigOverridable, {
         this.set('disablePrimary', !modified);
       }.observes('subViewController.isHostsModified')
     });
+  },
+
+  loadInstallerHostsFromServer: function (hostNames) {
+    return App.ajax.send({
+      name: 'hosts.info.install',
+      sender: this,
+      data: {
+        hostNames: hostNames
+      },
+      success: 'loadInstallerHostsSuccessCallback'
+    });
+  },
+
+  loadInstallerHostsSuccessCallback: function (data) {
+    var rawHosts = App.router.get('installerController.content.hosts'),
+      masterComponents = App.router.get('installerController.content.masterComponentHosts'),
+      slaveComponents = App.router.get('installerController.content.slaveComponentHosts'),
+      hosts = [];
+    masterComponents.forEach(function (component) {
+      var host = rawHosts[component.hostName];
+      if (host.hostComponents) {
+        host.hostComponents.push(Em.Object.create({
+          componentName: component.component,
+          displayName: component.display_name
+        }));
+      } else {
+        rawHosts[component.hostName].hostComponents = [
+          Em.Object.create({
+            componentName: component.component,
+            displayName: component.display_name
+          })
+        ]
+      }
+    });
+    slaveComponents.forEach(function (component) {
+      component.hosts.forEach(function (rawHost) {
+        var host = rawHosts[rawHost.hostName];
+        if (host.hostComponents) {
+          host.hostComponents.push(Em.Object.create({
+            componentName: component.componentName,
+            displayName: component.displayName
+          }));
+        } else {
+          rawHosts[rawHost.hostName].hostComponents = [
+            Em.Object.create({
+              componentName: component.componentName,
+              displayName: component.displayName
+            })
+          ]
+        }
+      });
+    });
+
+    data.items.forEach(function (host) {
+      var disksOverallCapacity = 0,
+        diskFree = 0;
+      host.Hosts.disk_info.forEach(function (disk) {
+        disksOverallCapacity += parseFloat(disk.size);
+        diskFree += parseFloat(disk.available);
+      });
+      hosts.pushObject(Em.Object.create({
+        id: host.Hosts.host_name,
+        ip: host.Hosts.ip,
+        osType: host.Hosts.os_type,
+        osArch: host.Hosts.os_arch,
+        hostName: host.Hosts.host_name,
+        publicHostName: host.Hosts.public_host_name,
+        cpu: host.Hosts.cpu_count,
+        memory: host.Hosts.total_mem.toFixed(2),
+        diskInfo: host.Hosts.disk_info,
+        diskTotal: disksOverallCapacity / (1024 * 1024),
+        diskFree: diskFree / (1024 * 1024),
+        hostComponents: (rawHosts[host.Hosts.host_name] && rawHosts[host.Hosts.host_name].hostComponents) || []
+      }));
+    });
+
+    this.set('clusterHosts', hosts);
   }
 
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/app/controllers/wizard.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard.js b/ambari-web/app/controllers/wizard.js
index a8a0249..efda62d 100644
--- a/ambari-web/app/controllers/wizard.js
+++ b/ambari-web/app/controllers/wizard.js
@@ -114,39 +114,21 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, App.ThemesMappingM
   allHosts: function () {
     var dbHosts = this.get('content.hosts');
     var hosts = [];
-    var hostComponents = [];
 
     for (var hostName in dbHosts) {
-      hostComponents = [];
-      var disksOverallCapacity = 0;
-      var diskFree = 0;
+      var hostComponents = [];
       dbHosts[hostName].hostComponents.forEach(function (componentName) {
         hostComponents.push(Em.Object.create({
           componentName: componentName,
           displayName: App.format.role(componentName, false)
         }));
       });
-      dbHosts[hostName].disk_info.forEach(function (disk) {
-        disksOverallCapacity += parseFloat(disk.size);
-        diskFree += parseFloat(disk.available);
-      });
 
       hosts.push(Em.Object.create({
         id: hostName,
         hostName: hostName,
-        publicHostName: hostName,
-        diskInfo: dbHosts[hostName].disk_info,
-        diskTotal: disksOverallCapacity / (1024 * 1024),
-        diskFree: diskFree / (1024 * 1024),
-        disksMounted: dbHosts[hostName].disk_info.length,
-        cpu: dbHosts[hostName].cpu,
-        memory: dbHosts[hostName].memory,
-        osType: dbHosts[hostName].osType ? dbHosts[hostName].osType: 0,
-        osArch: dbHosts[hostName].osArch ? dbHosts[hostName].osArch : 0,
-        ip: dbHosts[hostName].ip ? dbHosts[hostName].ip: 0,
-        hostComponents: hostComponents,
-        maintenanceState: dbHosts[hostName].maintenance_state
-      }))
+        hostComponents: hostComponents
+      }));
     }
     return hosts;
   }.property('content.hosts'),
@@ -771,13 +753,6 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, App.ThemesMappingM
       if (_host.bootStatus === 'REGISTERED') {
         hosts[_host.name] = {
           name: _host.name,
-          cpu: _host.cpu,
-          memory: _host.memory,
-          disk_info: _host.disk_info,
-          os_type: _host.os_type,
-          os_arch: _host.os_arch,
-          ip: _host.ip,
-          maintenance_state: _host.maintenance_state,
           bootStatus: _host.bootStatus,
           isInstalled: false,
           id: indx++
@@ -1308,13 +1283,6 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, App.ThemesMappingM
     response.items.forEach(function (item, indx) {
       installedHosts[item.Hosts.host_name] = {
         name: item.Hosts.host_name,
-        cpu: item.Hosts.cpu_count,
-        memory: item.Hosts.total_mem,
-        disk_info: item.Hosts.disk_info,
-        osType: item.Hosts.os_type,
-        osArch: item.Hosts.os_arch,
-        ip: item.Hosts.ip,
-        maintenance_state: item.Hosts.maintenance_state,
         bootStatus: "REGISTERED",
         isInstalled: true,
         hostComponents: item.host_components,

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/app/controllers/wizard/step7/assign_master_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step7/assign_master_controller.js b/ambari-web/app/controllers/wizard/step7/assign_master_controller.js
index 099931a..e8eaf47 100644
--- a/ambari-web/app/controllers/wizard/step7/assign_master_controller.js
+++ b/ambari-web/app/controllers/wizard/step7/assign_master_controller.js
@@ -314,6 +314,7 @@ App.AssignMasterOnStep7Controller = Em.Controller.extend(App.BlueprintMixin, App
 
       this.set("hosts", result);
       this.sortHosts(result);
+      this.set('isHostsLoaded', true);
     }
   },
 
@@ -642,5 +643,18 @@ App.AssignMasterOnStep7Controller = Em.Controller.extend(App.BlueprintMixin, App
       });
     }
     return masterComponents;
-  }
+  },
+
+  getHosts: function () {
+    var result,
+      parentController = this.get('content.controllerName');
+    if (parentController) {
+      result = this._super();
+    } else {
+      result = this.get('hosts').mapProperty('host_name');
+    }
+    return result;
+  },
+
+  clearStepOnExit: Em.K
 });
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/app/mixins/wizard/assign_master_components.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/wizard/assign_master_components.js b/ambari-web/app/mixins/wizard/assign_master_components.js
index 3e2a09a..7c4929f 100644
--- a/ambari-web/app/mixins/wizard/assign_master_components.js
+++ b/ambari-web/app/mixins/wizard/assign_master_components.js
@@ -250,10 +250,22 @@ App.AssignMasterComponents = Em.Mixin.create(App.HostComponentValidationMixin, A
   selectedServicesMasters: [],
 
   /**
+   * Is hosts data loaded
+   * @type {bool}
+   */
+  isHostsLoaded: false,
+
+  /**
+   * Are recommendations loaded
+   * @type {bool}
+   */
+  isRecommendationsLoaded: false,
+
+  /**
    * Is data for current step loaded
    * @type {bool}
    */
-  isLoaded: false,
+  isLoaded: Em.computed.and('isHostsLoaded', 'isRecommendationsLoaded'),
 
   /**
    * Is back from the next step
@@ -341,7 +353,7 @@ App.AssignMasterComponents = Em.Mixin.create(App.HostComponentValidationMixin, A
     }, this);
 
     return mapping.sortProperty('host_name');
-  }.property("selectedServicesMasters.@each.selectedHost", 'selectedServicesMasters.@each.isHostNameValid'),
+  }.property('selectedServicesMasters.@each.selectedHost', 'selectedServicesMasters.@each.isHostNameValid', 'isLoaded'),
 
   /**
    * Count of hosts without masters
@@ -484,7 +496,8 @@ App.AssignMasterComponents = Em.Mixin.create(App.HostComponentValidationMixin, A
   clearStep: function () {
     this.setProperties({
       hosts: [],
-      isLoaded: false,
+      isHostsLoaded: false,
+      isRecommendationsLoaded: false,
       backFromNextStep: false,
       selectedServicesMasters: [],
       servicesMasters: []
@@ -494,6 +507,10 @@ App.AssignMasterComponents = Em.Mixin.create(App.HostComponentValidationMixin, A
     }, this);
   },
 
+  clearStepOnExit: function () {
+    this.clearStep();
+  },
+
   /**
    * Load controller data (hosts, host components etc)
    * @method loadStep
@@ -511,7 +528,7 @@ App.AssignMasterComponents = Em.Mixin.create(App.HostComponentValidationMixin, A
       this.set('backFromNextStep',true);
     }
     this.getRecommendedHosts({
-      hosts: this.get('hosts').mapProperty('host_name')
+      hosts: this.getHosts()
     }).then(function() {
       self.loadStepCallback(self.createComponentInstallationObjects(), self);
     });
@@ -527,7 +544,7 @@ App.AssignMasterComponents = Em.Mixin.create(App.HostComponentValidationMixin, A
     self.get('addableComponents').forEach(function (componentName) {
       self.updateComponent(componentName);
     }, self);
-    self.set('isLoaded', true);
+    self.set('isRecommendationsLoaded', true);
     if (self.thereIsNoMasters() && !self.get('mastersToCreate').length) {
       App.router.send('next');
     }
@@ -579,25 +596,40 @@ App.AssignMasterComponents = Em.Mixin.create(App.HostComponentValidationMixin, A
    * @method renderHostInfo
    */
   renderHostInfo: function () {
-    var hostInfo = this.get('content.hosts');
-    var result = [];
+    var isInstaller = (this.get('wizardController.name') === 'installerController' || this.get('content.controllerName') === 'installerController');
+    App.ajax.send({
+      name: isInstaller ? 'hosts.info.install' : 'hosts.high_availability.wizard',
+      sender: this,
+      data: {
+        hostNames: isInstaller ? this.getHosts().join() : null
+      },
+      success: 'loadWizardHostsSuccessCallback'
+    });
+  },
 
-    for (var index in hostInfo) {
-      var _host = hostInfo[index];
+  loadWizardHostsSuccessCallback: function (data) {
+    var hostInfo = this.get('content.hosts'),
+      result = [];
+    data.items.forEach(function (host) {
+      var hostName = host.Hosts.host_name,
+        _host = hostInfo[hostName],
+        cpu = host.Hosts.cpu_count,
+        memory = host.Hosts.total_mem.toFixed(2);
       if (_host.bootStatus === 'REGISTERED') {
         result.push(Em.Object.create({
-          host_name: _host.name,
-          cpu: _host.cpu,
-          memory: _host.memory,
-          disk_info: _host.disk_info,
-          maintenance_state: _host.maintenance_state,
+          host_name: hostName,
+          cpu: cpu,
+          memory: memory,
+          disk_info: host.Hosts.disk_info,
+          maintenance_state: host.Hosts.maintenance_state,
           isInstalled: _host.isInstalled,
-          host_info: Em.I18n.t('installer.step5.hostInfo').fmt(_host.name, numberUtils.bytesToSize(_host.memory, 1, 'parseFloat', 1024), _host.cpu)
+          host_info: Em.I18n.t('installer.step5.hostInfo').fmt(hostName, numberUtils.bytesToSize(memory, 1, 'parseFloat', 1024), cpu)
         }));
       }
-    }
-    this.set("hosts", result);
+    }, this);
+    this.set('hosts', result);
     this.sortHosts(this.get('hosts'));
+    this.set('isHostsLoaded', true);
   },
 
   /**
@@ -1101,7 +1133,8 @@ App.AssignMasterComponents = Em.Mixin.create(App.HostComponentValidationMixin, A
   },
 
   recommendAndValidate: function(callback) {
-    var self = this;
+    var self = this,
+      hostNames = this.getHosts();
 
     if (this.get('validationInProgress')) {
       this.set('runQueuedValidation', true);
@@ -1112,11 +1145,11 @@ App.AssignMasterComponents = Em.Mixin.create(App.HostComponentValidationMixin, A
 
     // load recommendations with partial request
     this.getRecommendedHosts({
-      hosts: this.get('hosts').mapProperty('host_name'),
+      hosts: hostNames,
       components: this.getCurrentComponentHostMap()
     }).then(function() {
       self.validateSelectedHostComponents({
-        hosts: self.get('hosts').mapProperty('host_name'),
+        hosts: hostNames,
         blueprint: self.get('recommendations')
       }).then(function() {
         if (callback) {
@@ -1224,5 +1257,9 @@ App.AssignMasterComponents = Em.Mixin.create(App.HostComponentValidationMixin, A
         self.set('submitButtonClicked', false);
       }
     });
+  },
+
+  getHosts: function () {
+    return Em.keys(this.get('content.hosts'));
   }
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/app/mixins/wizard/wizardHostsLoading.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/wizard/wizardHostsLoading.js b/ambari-web/app/mixins/wizard/wizardHostsLoading.js
index 93dab02..950b51e 100644
--- a/ambari-web/app/mixins/wizard/wizardHostsLoading.js
+++ b/ambari-web/app/mixins/wizard/wizardHostsLoading.js
@@ -55,12 +55,8 @@ App.WizardHostsLoading = Em.Mixin.create({
     data.items.forEach(function (item) {
       hosts[item.Hosts.host_name] = {
         name: item.Hosts.host_name,
-        cpu: item.Hosts.cpu_count,
-        memory: item.Hosts.total_mem,
-        disk_info: item.Hosts.disk_info,
         bootStatus: "REGISTERED",
-        isInstalled: true,
-        maintenance_state: item.Hosts.maintenance_state
+        isInstalled: true
       };
     });
     App.db.setHosts(hosts);

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/app/routes/add_service_routes.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/add_service_routes.js b/ambari-web/app/routes/add_service_routes.js
index 89a4a36..1615f0d 100644
--- a/ambari-web/app/routes/add_service_routes.js
+++ b/ambari-web/app/routes/add_service_routes.js
@@ -167,6 +167,7 @@ module.exports = App.WizardRoute.extend({
       controller.dataLoading().done(function () {
         controller.loadAllPriorSteps().done(function () {
           App.logger.logTimerIfMoreThan(consoleMsg.format(2));
+          wizardStep2Controller.set('wizardController', controller);
           controller.connectOutlet('wizardStep5', controller.get('content'));
         });
       });

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/app/routes/installer.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/installer.js b/ambari-web/app/routes/installer.js
index daefa48..eae03a7 100644
--- a/ambari-web/app/routes/installer.js
+++ b/ambari-web/app/routes/installer.js
@@ -315,6 +315,7 @@ module.exports = Em.Route.extend(App.RouterRedirections, {
       });
       controller.setCurrentStep('5');
       controller.loadAllPriorSteps().done(function () {
+        wizardStep5Controller.set('wizardController', controller);
         controller.connectOutlet('wizardStep5', controller.get('content'));
         self.scrollTop();
         console.timeEnd('step5 connectOutlets');

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index 0b584d8..4dc04f4 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -2698,6 +2698,10 @@ var urls = {
     'real': '/clusters/{clusterName}/hosts?fields=Hosts/cpu_count,Hosts/disk_info,Hosts/total_mem,Hosts/ip,Hosts/os_type,Hosts/os_arch,Hosts/public_host_name,host_components&minimal_response=true',
     'mock': ''
   },
+  'hosts.info.install': {
+    'real': '/hosts?Hosts/host_name.in({hostNames})&fields=Hosts/cpu_count,Hosts/disk_info,Hosts/total_mem,Hosts/ip,Hosts/os_type,Hosts/os_arch,Hosts/public_host_name&minimal_response=true',
+    'mock': ''
+  },
   'hosts.host_components.pre_load': {
     real: '',
     mock: '/data/hosts/HDP2/hosts.json',

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/app/views/common/assign_master_components_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/assign_master_components_view.js b/ambari-web/app/views/common/assign_master_components_view.js
index 892bcae..001667a 100644
--- a/ambari-web/app/views/common/assign_master_components_view.js
+++ b/ambari-web/app/views/common/assign_master_components_view.js
@@ -49,6 +49,10 @@ App.AssignMasterComponentsView = Em.View.extend({
 
   didInsertElement: function () {
     this.get('controller').loadStep();
+  },
+
+  willDestroyElement: function () {
+    this.get('controller').clearStepOnExit();
   }
 });
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/app/views/main/service/reassign_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/reassign_view.js b/ambari-web/app/views/main/service/reassign_view.js
index 6885726..c7c1228 100644
--- a/ambari-web/app/views/main/service/reassign_view.js
+++ b/ambari-web/app/views/main/service/reassign_view.js
@@ -49,10 +49,6 @@ App.ReassignMasterView = Em.View.extend(App.WizardMenuMixin, {
     data.items.forEach(function (item) {
       hosts[item.Hosts.host_name] = {
         name: item.Hosts.host_name,
-        cpu: item.Hosts.cpu_count,
-        memory: item.Hosts.total_mem,
-        disk_info: item.Hosts.disk_info,
-        maintenance_state: item.Hosts.maintenance_state,
         bootStatus: "REGISTERED",
         isInstalled: true
       };

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/test/controllers/installer_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/installer_test.js b/ambari-web/test/controllers/installer_test.js
index 94af88e..bc91a8e 100644
--- a/ambari-web/test/controllers/installer_test.js
+++ b/ambari-web/test/controllers/installer_test.js
@@ -736,10 +736,6 @@ describe('App.InstallerController', function () {
     it ('Should return hosts', function() {
       var hosts = {
         'h1': {
-          disk_info: Em.A([{
-            available: 1,
-            size: 10
-          }]),
           hostComponents: Em.A([])
         }
       };
@@ -765,14 +761,6 @@ describe('App.InstallerController', function () {
       var res = JSON.parse(JSON.stringify(installerController.get('allHosts')));
       expect(res).to.eql([
         {
-          "diskInfo": [
-            {
-              "available": 1,
-              "size": 10
-            }
-          ],
-          "diskTotal": 0.0000095367431640625,
-          "diskFree": 9.5367431640625e-7,
           "hostComponents": [
             {
               "componentName": "component",

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/test/controllers/main/admin/highAvailability/journalNode/step1_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/admin/highAvailability/journalNode/step1_controller_test.js b/ambari-web/test/controllers/main/admin/highAvailability/journalNode/step1_controller_test.js
index 17f5ed2..9ae300c 100644
--- a/ambari-web/test/controllers/main/admin/highAvailability/journalNode/step1_controller_test.js
+++ b/ambari-web/test/controllers/main/admin/highAvailability/journalNode/step1_controller_test.js
@@ -199,9 +199,9 @@ describe('App.ManageJournalNodeWizardStep1Controller', function () {
       expect(controller.updateComponent.calledWith('C1')).to.be.true;
     });
 
-    it('isLoaded should be true', function() {
+    it('isRecommendationsLoaded should be true', function() {
       controller.loadStepCallback([], controller);
-      expect(controller.get('isLoaded')).to.be.true;
+      expect(controller.get('isRecommendationsLoaded')).to.be.true;
     });
   });
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/test/controllers/main/service/add_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service/add_controller_test.js b/ambari-web/test/controllers/main/service/add_controller_test.js
index a52c38f..1119176 100644
--- a/ambari-web/test/controllers/main/service/add_controller_test.js
+++ b/ambari-web/test/controllers/main/service/add_controller_test.js
@@ -167,25 +167,7 @@ describe('App.AddServiceController', function() {
   describe('#loadHostsSuccessCallback', function () {
 
     it('should load hosts to local db and model', function () {
-      var diskInfo = [
-          {
-            available: '600000',
-            used: '400000',
-            percent: '40%',
-            size: '10000000',
-            type: 'ext4',
-            mountpoint: '/'
-          },
-          {
-            available: '500000',
-            used: '300000',
-            percent: '50%',
-            size: '6000000',
-            type: 'ext4',
-            mountpoint: '/'
-          }
-        ],
-        hostComponents = [
+      var hostComponents = [
           [
             {
               HostRoles: {
@@ -219,31 +201,13 @@ describe('App.AddServiceController', function() {
           items: [
             {
               Hosts: {
-                cpu_count: 1,
-                disk_info: [
-                  diskInfo[0]
-                ],
                 host_name: 'h0',
-                ip: '10.1.1.0',
-                os_arch: 'x86_64',
-                os_type: 'centos6',
-                total_mem: 4194304,
-                maintenance_state: 'ON'
               },
               host_components: hostComponents[0]
             },
             {
               Hosts: {
-                cpu_count: 2,
-                disk_info: [
-                  diskInfo[1]
-                ],
-                host_name: 'h1',
-                ip: '10.1.1.1',
-                os_arch: 'x86',
-                os_type: 'centos5',
-                total_mem: 3145728,
-                maintenance_state: 'OFF'
+                host_name: 'h1'
               },
               host_components: hostComponents[1]
             }
@@ -252,29 +216,15 @@ describe('App.AddServiceController', function() {
         expected = {
           h0: {
             name: 'h0',
-            cpu: 1,
-            memory: 4194304,
-            disk_info: [diskInfo[0]],
-            osType: 'centos6',
-            osArch: 'x86_64',
-            ip: '10.1.1.0',
             bootStatus: 'REGISTERED',
             isInstalled: true,
-            maintenance_state: 'ON',
             hostComponents: hostComponents[0],
             id: 0
           },
           h1: {
             name: 'h1',
-            cpu: 2,
-            memory: 3145728,
-            disk_info: [diskInfo[1]],
-            osType: 'centos5',
-            osArch: 'x86',
-            ip: '10.1.1.1',
             bootStatus: 'REGISTERED',
             isInstalled: true,
-            maintenance_state: 'OFF',
             hostComponents: hostComponents[1],
             id: 1
           }

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/test/controllers/wizard/step5_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/wizard/step5_test.js b/ambari-web/test/controllers/wizard/step5_test.js
index 3c9048d..57c33b2 100644
--- a/ambari-web/test/controllers/wizard/step5_test.js
+++ b/ambari-web/test/controllers/wizard/step5_test.js
@@ -94,56 +94,86 @@ describe('App.WizardStep5Controller', function () {
 
   });
 
-  describe('#renderHostInfo', function () {
+  describe('#loadWizardHostsSuccessCallback', function () {
 
     var tests = Em.A([
       {
-        hosts: {
-          h1: {memory: 4, cpu: 1, name: 'host1', bootStatus: 'INIT'},
-          h2: {memory: 3, cpu: 1, name: 'host2', bootStatus: 'INIT'},
-          h3: {memory: 2, cpu: 1, name: 'host3', bootStatus: 'INIT'},
-          h4: {memory: 1, cpu: 1, name: 'host4', bootStatus: 'INIT'}
+        dbHosts: {
+          host1: {bootStatus: 'INIT'},
+          host2: {bootStatus: 'INIT'},
+          host3: {bootStatus: 'INIT'},
+          host4: {bootStatus: 'INIT'}
         },
+        hosts: [
+          {Hosts: {total_mem: 4, cpu_count: 1, host_name: 'host1', bootStatus: 'INIT'}},
+          {Hosts: {total_mem: 3, cpu_count: 1, host_name: 'host2', bootStatus: 'INIT'}},
+          {Hosts: {total_mem: 2, cpu_count: 1, host_name: 'host3', bootStatus: 'INIT'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host4', bootStatus: 'INIT'}}
+        ],
         m: 'no one host is REGISTERED',
         e: []
       },
       {
-        hosts: {
-          h1: {memory: 4, cpu: 1, name: 'host1', bootStatus: 'REGISTERED'},
-          h2: {memory: 3, cpu: 1, name: 'host2', bootStatus: 'REGISTERED'},
-          h3: {memory: 2, cpu: 1, name: 'host3', bootStatus: 'REGISTERED'},
-          h4: {memory: 1, cpu: 1, name: 'host4', bootStatus: 'REGISTERED'}
+        dbHosts: {
+          host1: {bootStatus: 'REGISTERED'},
+          host2: {bootStatus: 'REGISTERED'},
+          host3: {bootStatus: 'REGISTERED'},
+          host4: {bootStatus: 'REGISTERED'}
         },
+        hosts: [
+          {Hosts: {total_mem: 4, cpu_count: 1, host_name: 'host1', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 3, cpu_count: 1, host_name: 'host2', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 2, cpu_count: 1, host_name: 'host3', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host4', bootStatus: 'REGISTERED'}}
+        ],
         m: 'all hosts are REGISTERED, memory',
         e: ['host1', 'host2', 'host3', 'host4']
       },
       {
-        hosts: {
-          h1: {memory: 1, cpu: 4, name: 'host1', bootStatus: 'REGISTERED'},
-          h2: {memory: 1, cpu: 3, name: 'host2', bootStatus: 'REGISTERED'},
-          h3: {memory: 1, cpu: 2, name: 'host3', bootStatus: 'REGISTERED'},
-          h4: {memory: 1, cpu: 1, name: 'host4', bootStatus: 'REGISTERED'}
+        dbHosts: {
+          host1: {bootStatus: 'REGISTERED'},
+          host2: {bootStatus: 'REGISTERED'},
+          host3: {bootStatus: 'REGISTERED'},
+          host4: {bootStatus: 'REGISTERED'}
         },
+        hosts: [
+          {Hosts: {total_mem: 1, cpu_count: 4, host_name: 'host1', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 3, host_name: 'host2', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 2, host_name: 'host3', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host4', bootStatus: 'REGISTERED'}}
+        ],
         m: 'all hosts are REGISTERED, cpu',
         e: ['host1', 'host2', 'host3', 'host4']
       },
       {
-        hosts: {
-          h1: {memory: 1, cpu: 1, name: 'host4', bootStatus: 'REGISTERED'},
-          h2: {memory: 1, cpu: 1, name: 'host2', bootStatus: 'REGISTERED'},
-          h3: {memory: 1, cpu: 1, name: 'host3', bootStatus: 'REGISTERED'},
-          h4: {memory: 1, cpu: 1, name: 'host1', bootStatus: 'REGISTERED'}
+        dbHosts: {
+          host1: {bootStatus: 'REGISTERED'},
+          host2: {bootStatus: 'REGISTERED'},
+          host3: {bootStatus: 'REGISTERED'},
+          host4: {bootStatus: 'REGISTERED'}
         },
+        hosts: [
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host4', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host2', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host3', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host1', bootStatus: 'REGISTERED'}}
+        ],
         m: 'all hosts are REGISTERED, host_name',
         e: ['host1', 'host2', 'host3', 'host4']
       },
       {
-        hosts: {
-          h1: {memory: 2, cpu: 1, name: 'host1', bootStatus: 'REGISTERED'},
-          h2: {memory: 1, cpu: 2, name: 'host3', bootStatus: 'INIT'},
-          h3: {memory: 1, cpu: 1, name: 'host4', bootStatus: 'REGISTERED'},
-          h4: {memory: 1, cpu: 1, name: 'host2', bootStatus: 'INIT'}
+        dbHosts: {
+          host1: {bootStatus: 'REGISTERED'},
+          host2: {bootStatus: 'INIT'},
+          host3: {bootStatus: 'INIT'},
+          host4: {bootStatus: 'REGISTERED'}
         },
+        hosts: [
+          {Hosts: {total_mem: 2, cpu_count: 1, host_name: 'host1', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 2, host_name: 'host3', bootStatus: 'INIT'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host4', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host2', bootStatus: 'INIT'}}
+        ],
         m: 'mix',
         e: ['host1', 'host4']
       }
@@ -151,8 +181,8 @@ describe('App.WizardStep5Controller', function () {
 
     tests.forEach(function (test) {
       it(test.m, function () {
-        controller.set('content', {hosts: test.hosts});
-        controller.renderHostInfo();
+        controller.set('content', {hosts: test.dbHosts});
+        controller.loadWizardHostsSuccessCallback({items: test.hosts});
         var r = controller.get('hosts');
         expect(Em.A(r).mapProperty('host_name')).to.eql(test.e);
       });

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/test/controllers/wizard_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/wizard_test.js b/ambari-web/test/controllers/wizard_test.js
index 078f8ae..1ce96ed 100644
--- a/ambari-web/test/controllers/wizard_test.js
+++ b/ambari-web/test/controllers/wizard_test.js
@@ -1257,8 +1257,8 @@ describe('App.WizardController', function () {
 
     it('should return all hosts', function () {
       var hosts = {
-        'h1': {hostComponents: ['c1', 'c2'], disk_info: [{size: 2, available: 1}]},
-        'h2': {hostComponents: ['c3', 'c4'], disk_info: [{size: 2, available: 1}]}
+        'h1': {hostComponents: ['c1', 'c2']},
+        'h2': {hostComponents: ['c3', 'c4']}
       };
 
       var content = Em.Object.create({
@@ -1271,19 +1271,6 @@ describe('App.WizardController', function () {
         {
           "id": "h1",
           "hostName": "h1",
-          "publicHostName": "h1",
-          "diskInfo": [
-            {
-              "size": 2,
-              "available": 1
-            }
-          ],
-          "diskTotal": 0.0000019073486328125,
-          "diskFree": 9.5367431640625e-7,
-          "disksMounted": 1,
-          "osType": 0,
-          "osArch": 0,
-          "ip": 0,
           "hostComponents": [
             {
               "componentName": "c1",
@@ -1298,19 +1285,6 @@ describe('App.WizardController', function () {
         {
           "id": "h2",
           "hostName": "h2",
-          "publicHostName": "h2",
-          "diskInfo": [
-            {
-              "size": 2,
-              "available": 1
-            }
-          ],
-          "diskTotal": 0.0000019073486328125,
-          "diskFree": 9.5367431640625e-7,
-          "disksMounted": 1,
-          "osType": 0,
-          "osArch": 0,
-          "ip": 0,
           "hostComponents": [
             {
               "componentName": "c3",

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/test/views/main/admin/highAvailability/resourceManager/wizard_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/highAvailability/resourceManager/wizard_view_test.js b/ambari-web/test/views/main/admin/highAvailability/resourceManager/wizard_view_test.js
index d30be42..9f3b6a6 100644
--- a/ambari-web/test/views/main/admin/highAvailability/resourceManager/wizard_view_test.js
+++ b/ambari-web/test/views/main/admin/highAvailability/resourceManager/wizard_view_test.js
@@ -85,11 +85,7 @@ describe('App.RMHighAvailabilityWizardView', function () {
       items: [
         {
           Hosts: {
-            host_name: 'host1',
-            cpu_count: 1,
-            total_mem: 1,
-            disk_info: {},
-            maintenance_state: 'OFF'
+            host_name: 'host1'
           }
         }
       ]
@@ -108,23 +104,15 @@ describe('App.RMHighAvailabilityWizardView', function () {
       expect(view.get('controller.content.hosts')).to.be.eql({
         "host1": {
           "name": "host1",
-          "cpu": 1,
-          "memory": 1,
-          "disk_info": {},
           "bootStatus": "REGISTERED",
-          "isInstalled": true,
-          "maintenance_state": "OFF"
+          "isInstalled": true
         }
       });
       expect(App.db.setHosts.calledWith({
         "host1": {
           "name": "host1",
-          "cpu": 1,
-          "memory": 1,
-          "disk_info": {},
           "bootStatus": "REGISTERED",
-          "isInstalled": true,
-          "maintenance_state": "OFF"
+          "isInstalled": true
         }
       })).to.be.true;
     });

http://git-wip-us.apache.org/repos/asf/ambari/blob/292db86f/ambari-web/test/views/main/service/reassign_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/service/reassign_view_test.js b/ambari-web/test/views/main/service/reassign_view_test.js
index f6ae81e..4de4aad 100644
--- a/ambari-web/test/views/main/service/reassign_view_test.js
+++ b/ambari-web/test/views/main/service/reassign_view_test.js
@@ -69,10 +69,6 @@ describe('App.ReassignMasterView', function () {
         {
           Hosts: {
             host_name: 'host1',
-            cpu_count: 1,
-            total_mem: 1024,
-            disk_info: {},
-            maintenance_state: 'ON'
           }
         }
       ]
@@ -90,11 +86,7 @@ describe('App.ReassignMasterView', function () {
         {
           "host1": {
             "bootStatus": "REGISTERED",
-            "cpu": 1,
-            "disk_info": {},
             "isInstalled": true,
-            "maintenance_state": "ON",
-            "memory": 1024,
             "name": "host1"
           }
         }
@@ -104,11 +96,7 @@ describe('App.ReassignMasterView', function () {
       expect(view.get('controller.content.hosts')).to.be.eql({
         "host1": {
           "bootStatus": "REGISTERED",
-          "cpu": 1,
-          "disk_info": {},
           "isInstalled": true,
-          "maintenance_state": "ON",
-          "memory": 1024,
           "name": "host1"
         }
       });


[14/50] [abbrv] ambari git commit: AMBARI-21048. HDP 3.0 TP - create service definition for Storm with configs, kerberos, widgets, etc.(vbrodetsky)

Posted by ad...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/config.yaml.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/config.yaml.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/config.yaml.j2
new file mode 100644
index 0000000..67b89c4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/config.yaml.j2
@@ -0,0 +1,75 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+nimbusHost: {{nimbus_host}}
+nimbusPort: {{nimbus_port}}
+
+# HTTP-specific options.
+http:
+
+  # The port on which the HTTP server listens for service requests.
+  port: {{rest_api_port}}
+
+  # The port on which the HTTP server listens for administrative requests.
+  adminPort: {{rest_api_admin_port}}
+
+{% if ganglia_installed %}
+enableGanglia: {{ganglia_installed}}
+
+# ganglia configuration (necessary if ganglia reporting is enabled)
+ganglia:
+
+  # how often to report to ganglia metrics (in seconds)
+  reportInterval: {{ganglia_report_interval}}
+
+  # the hostname of the gmond server where storm cluster metrics will be sent
+  host: "{{ganglia_server}}"
+
+  # address mode
+  # default is MULTICAST
+  addressMode: "UNICAST"
+
+  # an <IP>:<HOSTNAME> pair to spoof
+  # this allows us to simulate storm cluster metrics coming from a specific host
+  #spoof: "192.168.1.1:storm"
+{% endif %}
+
+{% if has_metric_collector and stack_supports_storm_ams %}
+enableGanglia: False
+
+ganglia:
+  reportInterval: {{metric_collector_report_interval}}
+
+enableMetricsSink: True
+
+metrics_collector:
+
+  reportInterval: {{metric_collector_report_interval}}
+  collector.hosts: "{{ams_collector_hosts}}"
+  protocol: "{{metric_collector_protocol}}"
+  port: "{{metric_collector_port}}"
+  appId: "{{metric_collector_app_id}}"
+  host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+
+  # HTTPS settings
+  truststore.path : "{{metric_truststore_path}}"
+  truststore.type : "{{metric_truststore_type}}"
+  truststore.password : "{{metric_truststore_password}}"
+
+  instanceId={{cluster_name}}
+  set.instanceId={{set_instanceId}}
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/input.config-storm.json.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/input.config-storm.json.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/input.config-storm.json.j2
new file mode 100644
index 0000000..a2a4841
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/input.config-storm.json.j2
@@ -0,0 +1,78 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"storm_drpc",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/drpc.log"
+    },
+    {
+      "type":"storm_logviewer",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/logviewer.log"
+    },
+    {
+      "type":"storm_nimbus",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/nimbus.log"
+    },
+    {
+      "type":"storm_supervisor",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/supervisor.log"
+    },
+    {
+      "type":"storm_ui",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/ui.log"
+    },
+    {
+      "type":"storm_worker",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/*worker*.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "storm_drpc",
+            "storm_logviewer",
+            "storm_nimbus",
+            "storm_supervisor",
+            "storm_ui",
+            "storm_worker"
+          ]
+        }
+      },
+      "log4j_format":"",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\[%{LOGLEVEL:level}\\]%{SPACE}%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss.SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm-metrics2.properties.j2
new file mode 100644
index 0000000..1dedffc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm-metrics2.properties.j2
@@ -0,0 +1,34 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+collector.hosts={{ams_collector_hosts}}
+protocol={{metric_collector_protocol}}
+port={{metric_collector_port}}
+zookeeper.quorum={{zookeeper_quorum}}
+maxRowCacheSize=10000
+sendInterval={{metrics_report_interval}}000
+clusterReporterAppId=nimbus
+instanceId={{cluster_name}}
+set.instanceId={{set_instanceId}}
+host_in_memory_aggregation = {{host_in_memory_aggregation}}
+host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+
+# HTTPS properties
+truststore.path = {{metric_truststore_path}}
+truststore.type = {{metric_truststore_type}}
+truststore.password = {{metric_truststore_password}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm.conf.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm.conf.j2
new file mode 100644
index 0000000..82a26fe
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{storm_user}}   - nofile   {{storm_user_nofile_limit}}
+{{storm_user}}   - nproc    {{storm_user_nproc_limit}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm_jaas.conf.j2
new file mode 100644
index 0000000..c22cb51
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm_jaas.conf.j2
@@ -0,0 +1,65 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+{% if stack_supports_storm_kerberos %}
+StormServer {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{nimbus_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   principal="{{nimbus_jaas_principal}}";
+};
+StormClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{storm_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="{{nimbus_bare_jaas_principal}}"
+   principal="{{storm_jaas_principal}}";
+};
+RegistryClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{storm_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   principal="{{storm_jaas_principal}}";
+};
+{% endif %}
+Client {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{storm_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="zookeeper"
+   principal="{{storm_jaas_principal}}";
+};
+
+{% if kafka_bare_jaas_principal %}
+KafkaClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{storm_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="{{kafka_bare_jaas_principal}}"
+   principal="{{storm_jaas_principal}}";
+};
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/worker-launcher.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/worker-launcher.cfg.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/worker-launcher.cfg.j2
new file mode 100644
index 0000000..2228601
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/worker-launcher.cfg.j2
@@ -0,0 +1,19 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+storm.worker-launcher.group={{user_group}}
+min.user.id={{min_user_ruid}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/quicklinks/quicklinks.json
new file mode 100644
index 0000000..d45f337
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/quicklinks/quicklinks.json
@@ -0,0 +1,45 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"ui.https.keystore.path",
+          "desired":"EXIST",
+          "site":"storm-site"
+        },
+        {
+          "property":"ui.https.key.password",
+          "desired":"EXIST",
+          "site":"storm-site"
+        },
+        {
+          "property":"ui.https.port",
+          "desired":"EXIST",
+          "site":"storm-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "storm_ui",
+        "label": "Storm UI",
+        "requires_user_name": "false",
+        "component_name": "STORM_UI_SERVER",
+        "url":"%@://%@:%@/",
+        "port":{
+          "http_property": "ui.port",
+          "http_default_port": "8744",
+          "https_property": "ui.https.port",
+          "https_default_port": "8740",
+          "regex": "^(\\d+)$",
+          "site": "storm-site"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/role_command_order.json
new file mode 100644
index 0000000..c8dfd8b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/role_command_order.json
@@ -0,0 +1,13 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for STORM",
+    "NIMBUS-START" : ["ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "NAMENODE-START"],
+    "SUPERVISOR-START" : ["NIMBUS-START"],
+    "STORM_UI_SERVER-START" : ["NIMBUS-START", "NAMENODE-START"],
+    "DRPC_SERVER-START" : ["NIMBUS-START"],
+    "STORM_REST_API-START" : ["NIMBUS-START", "STORM_UI_SERVER-START", "SUPERVISOR-START", "DRPC_SERVER-START"],
+    "STORM_SERVICE_CHECK-SERVICE_CHECK": ["NIMBUS-START", "SUPERVISOR-START", "STORM_UI_SERVER-START",
+      "DRPC_SERVER-START"],
+    "NIMBUS-STOP" : ["SUPERVISOR-STOP", "STORM_UI_SERVER-STOP", "DRPC_SERVER-STOP"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/widgets.json b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/widgets.json
new file mode 100644
index 0000000..d22a1ed
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/widgets.json
@@ -0,0 +1,127 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_storm_dashboard",
+      "display_name": "Standard Storm Dashboard",
+      "section_name": "STORM_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Number of Slots",
+          "description": "Number of Slots",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Used Slots",
+              "metric_path": "metrics/storm/nimbus/usedslots",
+              "service_name": "STORM",
+              "component_name": "NIMBUS"
+            },
+            {
+              "name": "Free Slots",
+              "metric_path": "metrics/storm/nimbus/freeslots",
+              "service_name": "STORM",
+              "component_name": "NIMBUS"
+            },
+            {
+              "name": "Total Slots",
+              "metric_path": "metrics/storm/nimbus/totalslots",
+              "service_name": "STORM",
+              "component_name": "NIMBUS"
+            }
+          ],
+          "values": [
+            {
+              "name": "Used slots",
+              "value": "${Used Slots}"
+            },
+            {
+              "name": "Free slots",
+              "value": "${Free Slots}"
+            },
+            {
+              "name": "Total slots",
+              "value": "${Total Slots}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Number of executors",
+          "description": "Number of executors",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Total Executors",
+              "metric_path": "metrics/storm/nimbus/totalexecutors",
+              "service_name": "STORM",
+              "component_name": "NIMBUS"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total executors",
+              "value": "${Total Executors}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Number of topologies",
+          "description": "Number of topologies",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Topologies",
+              "metric_path": "metrics/storm/nimbus/topologies",
+              "service_name": "STORM",
+              "component_name": "NIMBUS"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total topologies",
+              "value": "${Topologies}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Number of tasks",
+          "description": "Number of tasks",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Total Tasks",
+              "metric_path": "metrics/storm/nimbus/totaltasks",
+              "service_name": "STORM",
+              "component_name": "NIMBUS"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total tasks",
+              "value": "${Total Tasks}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ab4d28a/ambari-server/src/main/resources/stacks/HDP/3.0/services/STORM/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/STORM/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/STORM/metainfo.xml
new file mode 100644
index 0000000..1833c6f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/STORM/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>STORM</name>
+            <version>1.0.1.3.0</version>
+            <extends>common-services/STORM/1.0.1.3.0</extends>
+        </service>
+    </services>
+</metainfo>


[38/50] [abbrv] ambari git commit: AMBARI-21033 Log Search use POJOs for input configuration (mgergely)

Posted by ad...@apache.org.
AMBARI-21033 Log Search use POJOs for input configuration (mgergely)

Change-Id: Ibf28c16309cf3ced0f0eea69d832ecd8accd2d62


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fd4a7a46
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fd4a7a46
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fd4a7a46

Branch: refs/heads/ambari-rest-api-explorer
Commit: fd4a7a46a2db9869dca28294660ca40e693504ea
Parents: cbb1e90
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Mon May 22 12:49:50 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Mon May 22 12:49:50 2017 +0200

----------------------------------------------------------------------
 .../ambari-logsearch-config-api/pom.xml         |  14 +-
 .../config/api/InputConfigMonitor.java          |  13 +-
 .../logsearch/config/api/LogSearchConfig.java   |   3 +-
 .../config/api/LogSearchConfigFactory.java      |  10 +-
 .../api/model/inputconfig/Conditions.java       |  24 +++
 .../config/api/model/inputconfig/Fields.java    |  26 +++
 .../api/model/inputconfig/FilterDescriptor.java |  39 ++++
 .../model/inputconfig/FilterGrokDescriptor.java |  28 +++
 .../model/inputconfig/FilterJsonDescriptor.java |  23 +++
 .../inputconfig/FilterKeyValueDescriptor.java   |  28 +++
 .../api/model/inputconfig/InputConfig.java      |  28 +++
 .../api/model/inputconfig/InputDescriptor.java  |  54 +++++
 .../inputconfig/InputFileBaseDescriptor.java    |  28 +++
 .../model/inputconfig/InputFileDescriptor.java  |  23 +++
 .../inputconfig/InputS3FileDescriptor.java      |  26 +++
 .../model/inputconfig/MapDateDescriptor.java    |  26 +++
 .../inputconfig/MapFieldCopyDescriptor.java     |  24 +++
 .../model/inputconfig/MapFieldDescriptor.java   |  24 +++
 .../inputconfig/MapFieldNameDescriptor.java     |  24 +++
 .../inputconfig/MapFieldValueDescriptor.java    |  26 +++
 .../api/model/inputconfig/PostMapValues.java    |  26 +++
 .../config/api/LogSearchConfigClass1.java       |   3 +-
 .../config/api/LogSearchConfigClass2.java       |   3 +-
 .../ambari-logsearch-config-zookeeper/pom.xml   |   6 +
 .../config/zookeeper/LogSearchConfigZK.java     |  72 ++++++-
 .../model/inputconfig/impl/ConditionsImpl.java  |  37 ++++
 .../model/inputconfig/impl/FieldsImpl.java      |  39 ++++
 .../model/inputconfig/impl/FilterAdapter.java   |  42 ++++
 .../inputconfig/impl/FilterDescriptorImpl.java  | 113 ++++++++++
 .../impl/FilterGrokDescriptorImpl.java          |  66 ++++++
 .../impl/FilterJsonDescriptorImpl.java          |  25 +++
 .../impl/FilterKeyValueDescriptorImpl.java      |  63 ++++++
 .../model/inputconfig/impl/InputAdapter.java    |  58 ++++++
 .../model/inputconfig/impl/InputConfigGson.java |  46 +++++
 .../model/inputconfig/impl/InputConfigImpl.java |  54 +++++
 .../inputconfig/impl/InputDescriptorImpl.java   | 204 +++++++++++++++++++
 .../impl/InputFileBaseDescriptorImpl.java       |  66 ++++++
 .../impl/InputFileDescriptorImpl.java           |  25 +++
 .../impl/InputS3FileDescriptorImpl.java         |  53 +++++
 .../inputconfig/impl/MapDateDescriptorImpl.java |  58 ++++++
 .../impl/MapFieldCopyDescriptorImpl.java        |  45 ++++
 .../impl/MapFieldNameDescriptorImpl.java        |  45 ++++
 .../impl/MapFieldValueDescriptorImpl.java       |  58 ++++++
 .../inputconfig/impl/PostMapValuesAdapter.java  |  99 +++++++++
 .../inputconfig/impl/PostMapValuesImpl.java     |  40 ++++
 .../org/apache/ambari/logfeeder/LogFeeder.java  |   2 +-
 .../ambari/logfeeder/common/ConfigBlock.java    | 107 +---------
 .../ambari/logfeeder/common/ConfigHandler.java  | 126 ++++++------
 .../ambari/logfeeder/common/ConfigItem.java     |  97 +++++++++
 .../apache/ambari/logfeeder/filter/Filter.java  |  53 +++--
 .../ambari/logfeeder/filter/FilterGrok.java     |  11 +-
 .../ambari/logfeeder/filter/FilterJSON.java     |   3 -
 .../ambari/logfeeder/filter/FilterKeyValue.java |  12 +-
 .../logfeeder/input/AbstractInputFile.java      |  16 +-
 .../apache/ambari/logfeeder/input/Input.java    | 112 ++++++----
 .../ambari/logfeeder/input/InputFile.java       |   6 +-
 .../ambari/logfeeder/input/InputS3File.java     |   5 +-
 .../ambari/logfeeder/input/InputSimulate.java   |  23 +--
 .../logfeeder/loglevelfilter/FilterLogData.java |   2 +-
 .../apache/ambari/logfeeder/mapper/Mapper.java  |   4 +-
 .../ambari/logfeeder/mapper/MapperDate.java     |  15 +-
 .../logfeeder/mapper/MapperFieldCopy.java       |  13 +-
 .../logfeeder/mapper/MapperFieldName.java       |  14 +-
 .../logfeeder/mapper/MapperFieldValue.java      |  14 +-
 .../apache/ambari/logfeeder/output/Output.java  |   3 -
 .../logfeeder/output/OutputLineFilter.java      |   2 +-
 .../ambari/logfeeder/output/OutputManager.java  |   8 +-
 .../ambari/logfeeder/output/OutputS3File.java   |  96 ++++-----
 .../ambari/logfeeder/util/LogFeederUtil.java    |  51 -----
 .../ambari/logfeeder/filter/FilterGrokTest.java |  37 ++--
 .../ambari/logfeeder/filter/FilterJSONTest.java |  14 +-
 .../logfeeder/filter/FilterKeyValueTest.java    |  41 ++--
 .../ambari/logfeeder/input/InputFileTest.java   |  22 +-
 .../logconfig/LogConfigHandlerTest.java         |  18 +-
 .../ambari/logfeeder/mapper/MapperDateTest.java |  44 ++--
 .../logfeeder/mapper/MapperFieldCopyTest.java   |  19 +-
 .../logfeeder/mapper/MapperFieldNameTest.java   |  19 +-
 .../logfeeder/mapper/MapperFieldValueTest.java  |  29 +--
 .../logfeeder/output/OutputLineFilterTest.java  |  22 +-
 .../logfeeder/output/OutputManagerTest.java     |  10 +-
 .../logfeeder/output/OutputS3FileTest.java      |  17 +-
 .../logsearch/manager/ShipperConfigManager.java |   7 +-
 .../model/common/LSServerConditions.java        |  41 ++++
 .../logsearch/model/common/LSServerFields.java  |  43 ++++
 .../logsearch/model/common/LSServerFilter.java  | 130 ++++++++++++
 .../model/common/LSServerFilterGrok.java        |  73 +++++++
 .../model/common/LSServerFilterJson.java        |  31 +++
 .../model/common/LSServerFilterKeyValue.java    |  71 +++++++
 .../logsearch/model/common/LSServerInput.java   | 149 ++++++++++++++
 .../model/common/LSServerInputConfig.java       |  87 ++++++++
 .../model/common/LSServerInputFile.java         |  31 +++
 .../model/common/LSServerInputFileBase.java     |  72 +++++++
 .../model/common/LSServerInputS3File.java       |  59 ++++++
 .../logsearch/model/common/LSServerMapDate.java |  61 ++++++
 .../model/common/LSServerMapField.java          |  30 +++
 .../model/common/LSServerMapFieldCopy.java      |  49 +++++
 .../model/common/LSServerMapFieldName.java      |  49 +++++
 .../model/common/LSServerMapFieldValue.java     |  61 ++++++
 .../model/common/LSServerPostMapValues.java     |  63 ++++++
 .../common/LSServerPostMapValuesSerializer.java |  39 ++++
 .../logsearch/rest/ShipperConfigResource.java   |   6 +-
 .../test-config/logfeeder/logfeeder.properties  |   1 +
 .../test-config/logsearch/logsearch.properties  |   4 -
 103 files changed, 3460 insertions(+), 621 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/pom.xml b/ambari-logsearch/ambari-logsearch-config-api/pom.xml
index 72fcc80..5355906 100644
--- a/ambari-logsearch/ambari-logsearch-config-api/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-config-api/pom.xml
@@ -41,17 +41,9 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.commons</groupId>
-      <artifactId>commons-lang3</artifactId>
-      <version>3.4</version>
-    </dependency>
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+      <version>1.7.7</version>
     </dependency>
   </dependencies>
 </project>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/InputConfigMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/InputConfigMonitor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/InputConfigMonitor.java
index 29a82a6..746c14c 100644
--- a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/InputConfigMonitor.java
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/InputConfigMonitor.java
@@ -19,19 +19,28 @@
 
 package org.apache.ambari.logsearch.config.api;
 
+import java.util.List;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
+
 /**
  * Monitors input configuration changes.
  */
 public interface InputConfigMonitor {
   /**
+   * @return A list of json strings for all the global config jsons.
+   */
+  List<String> getGlobalConfigJsons();
+  
+  /**
    * Notification of a new input configuration.
    * 
    * @param serviceName The name of the service for which the input configuration was created.
    * @param inputConfig The input configuration.
    * @throws Exception
    */
-  void loadInputConfigs(String serviceName, String inputConfig) throws Exception;
-
+  void loadInputConfigs(String serviceName, InputConfig inputConfig) throws Exception;
+  
   /**
    * Notification of the removal of an input configuration.
    * 

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfig.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfig.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfig.java
index 07921d0..4cbf21f 100644
--- a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfig.java
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfig.java
@@ -25,6 +25,7 @@ import java.util.Map;
 
 import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
 import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilterMap;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
 
 /**
  * Log Search Configuration, which uploads, retrieves configurations, and monitors it's changes.
@@ -71,7 +72,7 @@ public interface LogSearchConfig extends Closeable {
    * @param serviceName The name of the service looked for.
    * @return The input configuration for the service if it exists, null otherwise.
    */
-  String getInputConfig(String clusterName, String serviceName);
+  InputConfig getInputConfig(String clusterName, String serviceName);
 
   /**
    * Uploads the input configuration for a service in a cluster.

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactory.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactory.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactory.java
index 6ef4b90..947e7e7 100644
--- a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactory.java
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactory.java
@@ -22,14 +22,14 @@ package org.apache.ambari.logsearch.config.api;
 import java.util.Map;
 
 import org.apache.ambari.logsearch.config.api.LogSearchConfig.Component;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.log4j.Logger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Factory class for LogSearchConfig.
  */
 public class LogSearchConfigFactory {
-  private static final Logger LOG = Logger.getLogger(LogSearchConfigFactory.class);
+  private static final Logger LOG = LoggerFactory.getLogger(LogSearchConfigFactory.class);
 
   /**
    * Creates a Log Search Configuration instance that implements {@link org.apache.ambari.logsearch.config.api.LogSearchConfig}.
@@ -47,7 +47,7 @@ public class LogSearchConfigFactory {
     try {
       LogSearchConfig logSearchConfig = null;
       String configClassName = properties.get("logsearch.config.class");
-      if (!StringUtils.isBlank(configClassName)) {
+      if (configClassName != null && !"".equals(configClassName.trim())) {
         Class<?> clazz = Class.forName(configClassName);
         if (LogSearchConfig.class.isAssignableFrom(clazz)) {
           logSearchConfig = (LogSearchConfig) clazz.newInstance();
@@ -61,7 +61,7 @@ public class LogSearchConfigFactory {
       logSearchConfig.init(component, properties);
       return logSearchConfig;
     } catch (Exception e) {
-      LOG.fatal("Could not initialize logsearch config.", e);
+      LOG.error("Could not initialize logsearch config.", e);
       throw e;
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/Conditions.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/Conditions.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/Conditions.java
new file mode 100644
index 0000000..4da400a
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/Conditions.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface Conditions {
+  Fields getFields();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/Fields.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/Fields.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/Fields.java
new file mode 100644
index 0000000..5d34b1e
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/Fields.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+import java.util.Set;
+
+public interface Fields {
+  Set<String> getType();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterDescriptor.java
new file mode 100644
index 0000000..632c6cb
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterDescriptor.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+import java.util.List;
+import java.util.Map;
+
+public interface FilterDescriptor {
+  String getFilter();
+
+  Conditions getConditions();
+
+  Integer getSortOrder();
+
+  String getSourceField();
+
+  Boolean isRemoveSourceField();
+
+  Map<String, ? extends List<? extends PostMapValues>> getPostMapValues();
+
+  Boolean isEnabled();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterGrokDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterGrokDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterGrokDescriptor.java
new file mode 100644
index 0000000..e85ce97
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterGrokDescriptor.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface FilterGrokDescriptor extends FilterDescriptor {
+  String getLog4jFormat();
+
+  String getMultilinePattern();
+
+  String getMessagePattern();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterJsonDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterJsonDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterJsonDescriptor.java
new file mode 100644
index 0000000..08f1893
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterJsonDescriptor.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface FilterJsonDescriptor extends FilterDescriptor {
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterKeyValueDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterKeyValueDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterKeyValueDescriptor.java
new file mode 100644
index 0000000..6edd140
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterKeyValueDescriptor.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface FilterKeyValueDescriptor extends FilterDescriptor {
+  String getFieldSplit();
+
+  String getValueSplit();
+
+  String getValueBorders();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputConfig.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputConfig.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputConfig.java
new file mode 100644
index 0000000..8126ac9
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputConfig.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+import java.util.List;
+
+public interface InputConfig {
+  List<? extends InputDescriptor> getInput();
+
+  List<? extends FilterDescriptor> getFilter();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputDescriptor.java
new file mode 100644
index 0000000..c41da93
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputDescriptor.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+import java.util.Map;
+
+public interface InputDescriptor {
+  String getType();
+
+  String getRowtype();
+
+  String getPath();
+
+  Map<String, String> getAddFields();
+
+  String getSource();
+
+  Boolean isTail();
+
+  Boolean isGenEventMd5();
+
+  Boolean isUseEventMd5AsId();
+
+  String getStartPosition();
+
+  Boolean isCacheEnabled();
+
+  String getCacheKeyField();
+
+  Boolean getCacheLastDedupEnabled();
+
+  Integer getCacheSize();
+
+  Long getCacheDedupInterval();
+
+  Boolean isEnabled();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputFileBaseDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputFileBaseDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputFileBaseDescriptor.java
new file mode 100644
index 0000000..a393dc7
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputFileBaseDescriptor.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface InputFileBaseDescriptor extends InputDescriptor {
+  Boolean getProcessFile();
+
+  Boolean getCopyFile();
+
+  Integer getCheckpointIntervalMs();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputFileDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputFileDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputFileDescriptor.java
new file mode 100644
index 0000000..0070ad9
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputFileDescriptor.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface InputFileDescriptor extends InputFileBaseDescriptor {
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputS3FileDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputS3FileDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputS3FileDescriptor.java
new file mode 100644
index 0000000..b075629
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputS3FileDescriptor.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface InputS3FileDescriptor extends InputFileBaseDescriptor {
+  String getS3AccessKey();
+
+  String getS3SecretKey();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapDateDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapDateDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapDateDescriptor.java
new file mode 100644
index 0000000..f88435f
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapDateDescriptor.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface MapDateDescriptor extends MapFieldDescriptor {
+  String getSourceDatePattern();
+
+  public String getTargetDatePattern();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldCopyDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldCopyDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldCopyDescriptor.java
new file mode 100644
index 0000000..596c173
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldCopyDescriptor.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface MapFieldCopyDescriptor extends MapFieldDescriptor {
+  String getCopyName();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldDescriptor.java
new file mode 100644
index 0000000..db086c5
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldDescriptor.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface MapFieldDescriptor {
+  public String getJsonName();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldNameDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldNameDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldNameDescriptor.java
new file mode 100644
index 0000000..da8cd0d
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldNameDescriptor.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface MapFieldNameDescriptor extends MapFieldDescriptor {
+  String getNewFieldName();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldValueDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldValueDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldValueDescriptor.java
new file mode 100644
index 0000000..cf37e62
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldValueDescriptor.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface MapFieldValueDescriptor extends MapFieldDescriptor {
+  String getPreValue();
+
+  public String getPostValue();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/PostMapValues.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/PostMapValues.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/PostMapValues.java
new file mode 100644
index 0000000..5be7287
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/PostMapValues.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+import java.util.List;
+
+public interface PostMapValues {
+  List<MapFieldDescriptor> getMappers();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass1.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass1.java b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass1.java
index fc3fe5b..d7e3c0a 100644
--- a/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass1.java
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass1.java
@@ -26,6 +26,7 @@ import org.apache.ambari.logsearch.config.api.InputConfigMonitor;
 import org.apache.ambari.logsearch.config.api.LogSearchConfig;
 import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
 import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilterMap;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
 
 public class LogSearchConfigClass1 implements LogSearchConfig {
   @Override
@@ -52,7 +53,7 @@ public class LogSearchConfigClass1 implements LogSearchConfig {
   }
 
   @Override
-  public String getInputConfig(String clusterName, String serviceName) {
+  public InputConfig getInputConfig(String clusterName, String serviceName) {
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass2.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass2.java b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass2.java
index 346edb3..198c133 100644
--- a/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass2.java
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass2.java
@@ -26,6 +26,7 @@ import org.apache.ambari.logsearch.config.api.InputConfigMonitor;
 import org.apache.ambari.logsearch.config.api.LogSearchConfig;
 import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
 import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilterMap;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
 
 public class LogSearchConfigClass2 implements LogSearchConfig {
   @Override
@@ -52,7 +53,7 @@ public class LogSearchConfigClass2 implements LogSearchConfig {
   }
 
   @Override
-  public String getInputConfig(String clusterName, String serviceName) {
+  public InputConfig getInputConfig(String clusterName, String serviceName) {
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/pom.xml b/ambari-logsearch/ambari-logsearch-config-zookeeper/pom.xml
index 2c59a4a..7ecda60 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/pom.xml
@@ -71,8 +71,14 @@
       <version>2.12.0</version>
     </dependency>
     <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+      <version>1.7.7</version>
+    </dependency>
+    <dependency>
       <groupId>com.google.code.gson</groupId>
       <artifactId>gson</artifactId>
+      <version>2.6.2</version>
     </dependency>
   </dependencies>
 </project>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
index 5e22374..4d10a5b 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
@@ -27,6 +27,10 @@ import java.util.TreeMap;
 import org.apache.ambari.logsearch.config.api.LogSearchConfig;
 import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
 import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilterMap;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputAdapter;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputConfigGson;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputConfigImpl;
 import org.apache.ambari.logsearch.config.api.InputConfigMonitor;
 import org.apache.ambari.logsearch.config.api.LogLevelFilterMonitor;
 import org.apache.commons.collections.MapUtils;
@@ -40,18 +44,23 @@ import org.apache.curator.framework.recipes.cache.TreeCacheEvent.Type;
 import org.apache.curator.framework.recipes.cache.TreeCacheListener;
 import org.apache.curator.retry.ExponentialBackoffRetry;
 import org.apache.curator.utils.ZKPaths;
-import org.apache.log4j.Logger;
 import org.apache.zookeeper.KeeperException.NodeExistsException;
 import org.apache.zookeeper.ZooDefs;
 import org.apache.zookeeper.data.ACL;
 import org.apache.zookeeper.data.Id;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Splitter;
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
 
 public class LogSearchConfigZK implements LogSearchConfig {
-  private static final Logger LOG = Logger.getLogger(LogSearchConfigZK.class);
+  private static final Logger LOG = LoggerFactory.getLogger(LogSearchConfigZK.class);
 
   private static final int SESSION_TIMEOUT = 15000;
   private static final int CONNECTION_TIMEOUT = 30000;
@@ -129,7 +138,16 @@ public class LogSearchConfigZK implements LogSearchConfig {
 
   @Override
   public void monitorInputConfigChanges(final InputConfigMonitor inputConfigMonitor,
-      final LogLevelFilterMonitor logLevelFilterMonitor ) throws Exception {
+      final LogLevelFilterMonitor logLevelFilterMonitor) throws Exception {
+    final JsonParser parser = new JsonParser();
+    final JsonArray globalConfigNode = new JsonArray();
+    for (String globalConfigJsonString : inputConfigMonitor.getGlobalConfigJsons()) {
+      JsonElement globalConfigJson = parser.parse(globalConfigJsonString);
+      globalConfigNode.add(globalConfigJson.getAsJsonObject().get("global"));
+    }
+    
+    createGlobalConfigNode(globalConfigNode);
+    
     TreeCacheListener listener = new TreeCacheListener() {
       public void childEvent(CuratorFramework client, TreeCacheEvent event) throws Exception {
         String nodeName = ZKPaths.getNodeFromPath(event.getData().getPath());
@@ -171,7 +189,16 @@ public class LogSearchConfigZK implements LogSearchConfig {
 
       private void addInputs(String serviceName, String inputConfig) {
         try {
-          inputConfigMonitor.loadInputConfigs(serviceName, inputConfig);
+          JsonElement inputConfigJson = parser.parse(inputConfig);
+          for (Map.Entry<String, JsonElement> typeEntry : inputConfigJson.getAsJsonObject().entrySet()) {
+            for (JsonElement e : typeEntry.getValue().getAsJsonArray()) {
+              for (JsonElement globalConfig : globalConfigNode) {
+                merge(globalConfig.getAsJsonObject(), e.getAsJsonObject());
+              }
+            }
+          }
+          
+          inputConfigMonitor.loadInputConfigs(serviceName, InputConfigGson.gson.fromJson(inputConfigJson, InputConfigImpl.class));
         } catch (Exception e) {
           LOG.error("Could not load input configuration for service " + serviceName + ":\n" + inputConfig, e);
         }
@@ -193,11 +220,39 @@ public class LogSearchConfigZK implements LogSearchConfig {
             break;
         }
       }
+
+      private void merge(JsonObject source, JsonObject target) {
+        for (Map.Entry<String, JsonElement> e : source.entrySet()) {
+          if (!target.has(e.getKey())) {
+            target.add(e.getKey(), e.getValue());
+          } else {
+            if (e.getValue().isJsonObject()) {
+              JsonObject valueJson = (JsonObject)e.getValue();
+              merge(valueJson, target.get(e.getKey()).getAsJsonObject());
+            }
+          }
+        }
+      }
     };
     cache.getListenable().addListener(listener);
     cache.start();
   }
 
+  private void createGlobalConfigNode(JsonArray globalConfigNode) {
+    String globalConfigNodePath = String.format("%s/%s/global", root, properties.get(CLUSTER_NAME_PROPERTY));
+    String data = InputConfigGson.gson.toJson(globalConfigNode);
+    
+    try {
+      if (cache.getCurrentData(globalConfigNodePath) != null) {
+        client.setData().forPath(globalConfigNodePath, data.getBytes());
+      } else {
+        client.create().creatingParentContainersIfNeeded().withACL(getAcls()).forPath(globalConfigNodePath, data.getBytes());
+      }
+    } catch (Exception e) {
+      LOG.warn("Exception during global config node creation/update", e);
+    }
+  }
+
   @Override
   public List<String> getServices(String clusterName) {
     String parentPath = String.format("%s/%s/input", root, clusterName);
@@ -206,9 +261,14 @@ public class LogSearchConfigZK implements LogSearchConfig {
   }
 
   @Override
-  public String getInputConfig(String clusterName, String serviceName) {
+  public InputConfig getInputConfig(String clusterName, String serviceName) {
+    String globalConfigNodePath = String.format("%s/%s/global", root, clusterName);
+    String globalConfigData = new String(cache.getCurrentData(globalConfigNodePath).getData());
+    JsonArray globalConfigs = (JsonArray) new JsonParser().parse(globalConfigData);
+    InputAdapter.setGlobalConfigs(globalConfigs);
+    
     ChildData childData = cache.getCurrentData(String.format("%s/%s/input/%s", root, clusterName, serviceName));
-    return childData == null ? null : new String(childData.getData());
+    return childData == null ? null : InputConfigGson.gson.fromJson(new String(childData.getData()), InputConfigImpl.class);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
new file mode 100644
index 0000000..8bbff8f
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.Conditions;
+
+import com.google.gson.annotations.Expose;
+
+public class ConditionsImpl implements Conditions {
+  @Expose
+  private FieldsImpl fields;
+
+  public FieldsImpl getFields() {
+    return fields;
+  }
+
+  public void setFields(FieldsImpl fields) {
+    this.fields = fields;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
new file mode 100644
index 0000000..68cd0e2
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.util.Set;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.Fields;
+
+import com.google.gson.annotations.Expose;
+
+public class FieldsImpl implements Fields {
+  @Expose
+  private Set<String> type;
+
+  public Set<String> getType() {
+    return type;
+  }
+
+  public void setType(Set<String> type) {
+    this.type = type;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterAdapter.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterAdapter.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterAdapter.java
new file mode 100644
index 0000000..b84403b
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterAdapter.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.lang.reflect.Type;
+
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+
+public class FilterAdapter implements JsonDeserializer<FilterDescriptorImpl> {
+  @Override
+  public FilterDescriptorImpl deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) {
+    switch (json.getAsJsonObject().get("filter").getAsString()) {
+      case "grok":
+        return (FilterDescriptorImpl)context.deserialize(json, FilterGrokDescriptorImpl.class);
+      case "keyvalue":
+        return (FilterDescriptorImpl)context.deserialize(json, FilterKeyValueDescriptorImpl.class);
+      case "json":
+        return (FilterDescriptorImpl)context.deserialize(json, FilterJsonDescriptorImpl.class);
+      default:
+        throw new IllegalArgumentException("Unknown filter type: " + json.getAsJsonObject().get("filter").getAsString());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
new file mode 100644
index 0000000..4e11715
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.PostMapValues;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public abstract class FilterDescriptorImpl implements FilterDescriptor {
+  @Expose
+  private String filter;
+
+  @Expose
+  private ConditionsImpl conditions;
+
+  @Expose
+  @SerializedName("sort_order")
+  private Integer sortOrder;
+
+  @Expose
+  @SerializedName("source_field")
+  private String sourceField;
+
+  @Expose
+  @SerializedName("remove_source_field")
+  private Boolean removeSourceField;
+
+  @Expose
+  @SerializedName("post_map_values")
+  private Map<String, List<PostMapValuesImpl>> postMapValues;
+
+  @Expose
+  @SerializedName("is_enabled")
+  private Boolean isEnabled;
+
+  public String getFilter() {
+    return filter;
+  }
+
+  public void setFilter(String filter) {
+    this.filter = filter;
+  }
+
+  public ConditionsImpl getConditions() {
+    return conditions;
+  }
+
+  public void setConditions(ConditionsImpl conditions) {
+    this.conditions = conditions;
+  }
+
+  public Integer getSortOrder() {
+    return sortOrder;
+  }
+
+  public void setSortOrder(Integer sortOrder) {
+    this.sortOrder = sortOrder;
+  }
+
+  public String getSourceField() {
+    return sourceField;
+  }
+
+  public void setSourceField(String sourceField) {
+    this.sourceField = sourceField;
+  }
+
+  public Boolean isRemoveSourceField() {
+    return removeSourceField;
+  }
+
+  public void setRemoveSourceField(Boolean removeSourceField) {
+    this.removeSourceField = removeSourceField;
+  }
+
+  public Map<String, ? extends List<? extends PostMapValues>> getPostMapValues() {
+    return postMapValues;
+  }
+
+  public void setPostMapValues(Map<String, List<PostMapValuesImpl>> postMapValues) {
+    this.postMapValues = postMapValues;
+  }
+
+  public Boolean isEnabled() {
+    return isEnabled;
+  }
+
+  public void setIsEnabled(Boolean isEnabled) {
+    this.isEnabled = isEnabled;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
new file mode 100644
index 0000000..7f40b7f
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterGrokDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class FilterGrokDescriptorImpl extends FilterDescriptorImpl implements FilterGrokDescriptor {
+  @Expose
+  @SerializedName("log4j_format")
+  private String log4jFormat;
+
+  @Expose
+  @SerializedName("multiline_pattern")
+  private String multilinePattern;
+
+  @Expose
+  @SerializedName("message_pattern")
+  private String messagePattern;
+
+  @Override
+  public String getLog4jFormat() {
+    return log4jFormat;
+  }
+
+  public void setLog4jFormat(String log4jFormat) {
+    this.log4jFormat = log4jFormat;
+  }
+
+  @Override
+  public String getMultilinePattern() {
+    return multilinePattern;
+  }
+
+  public void setMultilinePattern(String multilinePattern) {
+    this.multilinePattern = multilinePattern;
+  }
+
+  @Override
+  public String getMessagePattern() {
+    return messagePattern;
+  }
+
+  public void setMessagePattern(String messagePattern) {
+    this.messagePattern = messagePattern;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterJsonDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterJsonDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterJsonDescriptorImpl.java
new file mode 100644
index 0000000..9bf1a2b
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterJsonDescriptorImpl.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterJsonDescriptor;
+
+public class FilterJsonDescriptorImpl extends FilterDescriptorImpl implements FilterJsonDescriptor {
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
new file mode 100644
index 0000000..8e89990
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterKeyValueDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class FilterKeyValueDescriptorImpl extends FilterDescriptorImpl implements FilterKeyValueDescriptor {
+  @Expose
+  @SerializedName("field_split")
+  private String fieldSplit;
+
+  @Expose
+  @SerializedName("value_split")
+  private String valueSplit;
+
+  @Expose
+  @SerializedName("value_borders")
+  private String valueBorders;
+
+  public String getFieldSplit() {
+    return fieldSplit;
+  }
+
+  public void setFieldSplit(String fieldSplit) {
+    this.fieldSplit = fieldSplit;
+  }
+
+  public String getValueSplit() {
+    return valueSplit;
+  }
+
+  public void setValueSplit(String valueSplit) {
+    this.valueSplit = valueSplit;
+  }
+
+  public String getValueBorders() {
+    return valueBorders;
+  }
+
+  public void setValueBorders(String valueBorders) {
+    this.valueBorders = valueBorders;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputAdapter.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputAdapter.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputAdapter.java
new file mode 100644
index 0000000..86741c6
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputAdapter.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.lang.reflect.Type;
+
+import com.google.gson.JsonArray;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+
+public class InputAdapter implements JsonDeserializer<InputDescriptorImpl> {
+  private static JsonArray globalConfigs;
+  public static void setGlobalConfigs(JsonArray globalConfigs_) {
+    globalConfigs = globalConfigs_;
+  }
+  
+  @Override
+  public InputDescriptorImpl deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) {
+    String source = null;
+    if (json.getAsJsonObject().has("source")) {
+      source = json.getAsJsonObject().get("source").getAsString();
+    } else {
+      for (JsonElement e : globalConfigs) {
+        if (e.getAsJsonObject().has("source")) {
+          source = e.getAsJsonObject().get("source").getAsString();
+          break;
+        }
+      }
+    }
+    
+    switch (source) {
+      case "file":
+        return (InputDescriptorImpl)context.deserialize(json, InputFileDescriptorImpl.class);
+      case "s3_file":
+        return (InputDescriptorImpl)context.deserialize(json, InputS3FileDescriptorImpl.class);
+      default:
+        throw new IllegalArgumentException("Unknown input type: " + json.getAsJsonObject().get("source").getAsString());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigGson.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigGson.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigGson.java
new file mode 100644
index 0000000..3b78aff
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigGson.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.lang.reflect.Type;
+import java.util.List;
+
+import com.google.common.reflect.TypeToken;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+
+/**
+ * Helper class to convert betweeb json string and InputConfig class.
+ */
+public class InputConfigGson {
+  public static Gson gson;
+  static {
+    Type inputType = new TypeToken<InputDescriptorImpl>() {}.getType();
+    Type filterType = new TypeToken<FilterDescriptorImpl>() {}.getType();
+    Type postMapValuesType = new TypeToken<List<PostMapValuesImpl>>() {}.getType();
+    gson = new GsonBuilder()
+        .registerTypeAdapter(inputType, new InputAdapter())
+        .registerTypeAdapter(filterType, new FilterAdapter())
+        .registerTypeAdapter(postMapValuesType, new PostMapValuesAdapter())
+        .setPrettyPrinting()
+        .excludeFieldsWithoutExposeAnnotation()
+        .create();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
new file mode 100644
index 0000000..a4eba8e
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.util.List;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+
+import com.google.gson.annotations.Expose;
+
+public class InputConfigImpl implements InputConfig {
+  @Expose
+  private List<InputDescriptorImpl> input;
+
+  @Expose
+  private List<FilterDescriptorImpl> filter;
+
+  @Override
+  public List<? extends InputDescriptor> getInput() {
+    return input;
+  }
+
+  public void setInput(List<InputDescriptorImpl> input) {
+    this.input = input;
+  }
+
+  @Override
+  public List<? extends FilterDescriptor> getFilter() {
+    return filter;
+  }
+
+  public void setFilter(List<FilterDescriptorImpl> filter) {
+    this.filter = filter;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/fd4a7a46/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
new file mode 100644
index 0000000..94dcc2a
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.util.Map;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public abstract class InputDescriptorImpl implements InputDescriptor {
+  @Expose
+  private String type;
+
+  @Expose
+  private String rowtype;
+
+  @Expose
+  private String path;
+
+  @Expose
+  @SerializedName("add_fields")
+  private Map<String, String> addFields;
+  
+  @Expose
+  private String source;
+  
+  @Expose
+  private Boolean tail;
+  
+  @Expose
+  @SerializedName("gen_event_md5")
+  private Boolean genEventMd5;
+  
+  @Expose
+  @SerializedName("use_event_md5_as_id")
+  private Boolean useEventMd5AsId;
+  
+  @Expose
+  @SerializedName("start_position")
+  private String startPosition;
+
+  @Expose
+  @SerializedName("cache_enabled")
+  private Boolean cacheEnabled;
+
+  @Expose
+  @SerializedName("cache_key_field")
+  private String cacheKeyField;
+
+  @Expose
+  @SerializedName("cache_last_dedup_enabled")
+  private Boolean cacheLastDedupEnabled;
+
+  @Expose
+  @SerializedName("cache_size")
+  private Integer cacheSize;
+
+  @Expose
+  @SerializedName("cache_dedup_interval")
+  private Long cacheDedupInterval;
+
+  @Expose
+  @SerializedName("is_enabled")
+  private Boolean isEnabled;
+
+  public String getType() {
+    return type;
+  }
+
+  public void setType(String type) {
+    this.type = type;
+  }
+
+  public String getRowtype() {
+    return rowtype;
+  }
+
+  public void setRowtype(String rowType) {
+    this.rowtype = rowType;
+  }
+
+  public String getPath() {
+    return path;
+  }
+
+  public void setPath(String path) {
+    this.path = path;
+  }
+
+  public Map<String, String> getAddFields() {
+    return addFields;
+  }
+
+  public void setAddFields(Map<String, String> addFields) {
+    this.addFields = addFields;
+  }
+
+  public String getSource() {
+    return source;
+  }
+
+  public void setSource(String source) {
+    this.source = source;
+  }
+
+  public Boolean isTail() {
+    return tail;
+  }
+
+  public void setTail(Boolean tail) {
+    this.tail = tail;
+  }
+
+  public Boolean isGenEventMd5() {
+    return genEventMd5;
+  }
+
+  public void setGenEventMd5(Boolean genEventMd5) {
+    this.genEventMd5 = genEventMd5;
+  }
+
+  public Boolean isUseEventMd5AsId() {
+    return useEventMd5AsId;
+  }
+
+  public void setUseEventMd5AsId(Boolean useEventMd5AsId) {
+    this.useEventMd5AsId = useEventMd5AsId;
+  }
+
+  public String getStartPosition() {
+    return startPosition;
+  }
+
+  public void setStartPosition(String startPosition) {
+    this.startPosition = startPosition;
+  }
+
+  public Boolean isCacheEnabled() {
+    return cacheEnabled;
+  }
+
+  public void setCacheEnabled(Boolean cacheEnabled) {
+    this.cacheEnabled = cacheEnabled;
+  }
+
+  public String getCacheKeyField() {
+    return cacheKeyField;
+  }
+
+  public void setCacheKeyField(String cacheKeyField) {
+    this.cacheKeyField = cacheKeyField;
+  }
+
+  public Boolean getCacheLastDedupEnabled() {
+    return cacheLastDedupEnabled;
+  }
+
+  public void setCacheLastDedupEnabled(Boolean cacheLastDedupEnabled) {
+    this.cacheLastDedupEnabled = cacheLastDedupEnabled;
+  }
+
+  public Integer getCacheSize() {
+    return cacheSize;
+  }
+
+  public void setCacheSize(Integer cacheSize) {
+    this.cacheSize = cacheSize;
+  }
+
+  public Long getCacheDedupInterval() {
+    return cacheDedupInterval;
+  }
+
+  public void setCacheDedupInterval(Long cacheDedupInterval) {
+    this.cacheDedupInterval = cacheDedupInterval;
+  }
+
+  public Boolean isEnabled() {
+    return isEnabled;
+  }
+
+  public void setIsEnabled(Boolean isEnabled) {
+    this.isEnabled = isEnabled;
+  }
+}


[28/50] [abbrv] ambari git commit: Revert "ADDENDUM. AMBARI-21011. Upgrade Code. Append PATH to YARN config 'yarn.nodemanager.admin-env' for HDP 2.6."

Posted by ad...@apache.org.
Revert "ADDENDUM. AMBARI-21011. Upgrade Code. Append PATH to YARN config 'yarn.nodemanager.admin-env' for HDP 2.6."

This reverts commit d0a5cd4a6b22f0c8e02bb7ceb2d5de11314f542a.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0a61f985
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0a61f985
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0a61f985

Branch: refs/heads/ambari-rest-api-explorer
Commit: 0a61f9857ec95162801fc2c8aae05fef67fbbd52
Parents: d740384
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Sat May 20 00:28:27 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Sat May 20 00:29:40 2017 -0700

----------------------------------------------------------------------
 .../stacks/HDP/2.3/upgrades/config-upgrade.xml         |  8 --------
 .../stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml |  6 ------
 .../resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml  |  1 -
 .../stacks/HDP/2.4/upgrades/config-upgrade.xml         |  6 +-----
 .../stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml |  6 ------
 .../resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml  |  1 -
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml         |  8 --------
 .../stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |  6 ------
 .../resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml  |  4 ----
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml         | 13 -------------
 .../stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml |  7 -------
 .../resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml  |  1 -
 12 files changed, 1 insertion(+), 66 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0a61f985/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
index 98bb056..8b5c07d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
@@ -546,14 +546,6 @@
           </definition>
         </changes>
       </component>
-      <component name="NODEMANAGER">
-        <changes>
-          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-            <type>yarn-site</type>
-            <insert key="yarn.nodemanager.admin-env" value=",PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:$PATH" insert-type="append" newline-before="false" newline-after="false" />
-          </definition>
-        </changes>
-      </component>
     </service>
 
     <service name="MAPREDUCE2">

http://git-wip-us.apache.org/repos/asf/ambari/blob/0a61f985/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
index 4d2b3ec..5aa08c5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
@@ -353,12 +353,6 @@
         </task>
       </execute-stage>
 
-      <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM admin env">
-        <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-          <summary>Updating YARN NodeManager admin env config</summary>
-        </task>
-      </execute-stage>
-
       <!--Yarn Apptimeline server-->
       <execute-stage service="YARN" component="APP_TIMELINE_SERVER" title="Apply config changes for App timeline server">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixYarnWebServiceUrl">

http://git-wip-us.apache.org/repos/asf/ambari/blob/0a61f985/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
index f1dd943..d98bb53 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
@@ -789,7 +789,6 @@
       <component name="NODEMANAGER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_add_spark2_yarn_shuffle"/>
-          <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env"/>
         </pre-upgrade>
 
         <pre-downgrade/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0a61f985/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
index b448a2d..b3d19d4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
@@ -332,11 +332,7 @@
             <set key="yarn.nodemanager.aux-services" value="mapreduce_shuffle,spark_shuffle,spark2_shuffle"/>
             <!-- Ideally we need to append spark2_shuffle to the existing value -->
           </definition>
-          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-            <type>yarn-site</type>
-            <insert key="yarn.nodemanager.admin-env" value=",PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:$PATH" insert-type="append" newline-before="false" newline-after="false" />
-          </definition>
-      </changes>
+        </changes>
       </component>
     </service>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/0a61f985/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
index 4920f12..4a2a502 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
@@ -331,12 +331,6 @@
         </task>
       </execute-stage>
 
-      <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM admin env">
-        <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-          <summary>Updating YARN NodeManager admin env config</summary>
-        </task>
-      </execute-stage>
-
       <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
         <task xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename">
           <summary>Adding queue customization property</summary>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0a61f985/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
index 6acedc9..1eb9836 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
@@ -794,7 +794,6 @@
       <component name="NODEMANAGER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_add_spark2_yarn_shuffle"/>
-          <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env"/>
         </pre-upgrade>
 
         <pre-downgrade/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0a61f985/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index 61bd581..88b8a35 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -206,14 +206,6 @@
           </definition>
         </changes>
       </component>
-      <component name="NODEMANAGER">
-        <changes>
-          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-            <type>yarn-site</type>
-            <insert key="yarn.nodemanager.admin-env" value=",PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:$PATH" insert-type="append" newline-before="false" newline-after="false" />
-          </definition>
-        </changes>
-      </component>
     </service>
 
     <service name="MAPREDUCE2">

http://git-wip-us.apache.org/repos/asf/ambari/blob/0a61f985/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index d617a31..8c659ee 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -345,12 +345,6 @@
         </task>
       </execute-stage>
 
-      <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM admin env">
-        <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-          <summary>Updating YARN NM admin env config</summary>
-        </task>
-      </execute-stage>
-
       <execute-stage>
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixCapacitySchedulerOrderingPolicy">
           <summary>Validate Root Queue Ordering Policy</summary>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0a61f985/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index fb854b9..3054ca3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -721,10 +721,6 @@
       </component>
 
       <component name="NODEMANAGER">
-        <pre-upgrade>
-          <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env"/>
-        </pre-upgrade>
-        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0a61f985/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index a8ac1bc..a6b7523 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -117,19 +117,6 @@
           </definition>
         </changes>
       </component>
-      <component name="NODEMANAGER">
-        <changes>
-          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-            <type>yarn-site</type>
-            <insert key="yarn.nodemanager.admin-env" value=",PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:$PATH" insert-type="append" newline-before="false" newline-after="false" />
-          </definition>
-          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_llap_mem">
-            <type>yarn-site</type>
-            <set key="yarn.nodemanager.kill-escape.launch-command-line" value="slider-agent,LLAP"/>
-            <set key="yarn.nodemanager.kill-escape.user" value="hive"/>
-          </definition>
-        </changes>
-      </component>
     </service>
 
     <service name="KAFKA">

http://git-wip-us.apache.org/repos/asf/ambari/blob/0a61f985/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index ae7ffc5..1cdd184 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -328,13 +328,6 @@
       </execute-stage>
 
       <!-- YARN -->
-      <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM admin env">
-        <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
-          <summary>Updating YARN NodeManager admin env config</summary>
-        </task>
-      </execute-stage>
-
-      <!-- YARN -->
       <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM">
         <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_llap_mem">
           <summary>Updating YARN NodeManager config for LLAP</summary>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0a61f985/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index c2ae825..3e7e3d7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -696,7 +696,6 @@
 
       <component name="NODEMANAGER">
         <pre-upgrade>
-          <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env"/>
           <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_llap_mem"/>
         </pre-upgrade>
         <pre-downgrade/> <!--  no-op to prevent config changes on downgrade -->


[10/50] [abbrv] ambari git commit: AMBARI-21001: Hive 1.5.0 view does not load in Internet Explorer 11 (sangeetar)

Posted by ad...@apache.org.
AMBARI-21001: Hive 1.5.0 view does not load in Internet Explorer 11 (sangeetar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/dad74757
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/dad74757
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/dad74757

Branch: refs/heads/ambari-rest-api-explorer
Commit: dad74757d7ab067d5ecfb00665a496332da4ece5
Parents: 292db86
Author: Sangeeta Ravindran <sa...@apache.org>
Authored: Wed May 17 22:46:00 2017 -0700
Committer: Sangeeta Ravindran <sa...@apache.org>
Committed: Wed May 17 22:46:00 2017 -0700

----------------------------------------------------------------------
 .../hive-next/src/main/resources/ui/hive-web/app/routes/splash.js  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/dad74757/contrib/views/hive-next/src/main/resources/ui/hive-web/app/routes/splash.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive-next/src/main/resources/ui/hive-web/app/routes/splash.js b/contrib/views/hive-next/src/main/resources/ui/hive-web/app/routes/splash.js
index 087bab3..34379d2 100644
--- a/contrib/views/hive-next/src/main/resources/ui/hive-web/app/routes/splash.js
+++ b/contrib/views/hive-next/src/main/resources/ui/hive-web/app/routes/splash.js
@@ -98,7 +98,7 @@ export default Ember.Route.extend({
     }
 
     this.fetchServiceCheckPolicy()
-      .then((data) => {
+      .then (function(data) {
         var numberOfChecks = 0;
         var serviceCheckPolicy = data.serviceCheckPolicy;
         for (var serviceCheck in serviceCheckPolicy) {


[45/50] [abbrv] ambari git commit: AMBARI-21082 - Ambari 3.0: Outstanding wizard issues (rzang)

Posted by ad...@apache.org.
AMBARI-21082 - Ambari 3.0: Outstanding wizard issues (rzang)

Change-Id: Ia6e54d7dbd529cce59d3e2cdcc8fd6ed6a04ce1b


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9415478d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9415478d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9415478d

Branch: refs/heads/ambari-rest-api-explorer
Commit: 9415478dca03caf7f55ba21e8f00d954dc117757
Parents: 0626b78
Author: Richard Zang <rz...@apache.org>
Authored: Mon May 22 19:29:03 2017 -0700
Committer: Richard Zang <rz...@apache.org>
Committed: Mon May 22 19:29:03 2017 -0700

----------------------------------------------------------------------
 ambari-web/app/styles/application.less    | 11 ++++++++++-
 ambari-web/app/templates/wizard/step4.hbs | 16 ++++++++--------
 ambari-web/app/views/wizard/step4_view.js |  7 ++++++-
 3 files changed, 24 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9415478d/ambari-web/app/styles/application.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/application.less b/ambari-web/app/styles/application.less
index 2fc1cf2..95990b8 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -2671,4 +2671,13 @@ a.abort-icon:hover {
   &.overlay-visible {
     display: block;
   }
-}
\ No newline at end of file
+}
+
+.step-marker {
+  .step-index {
+    display: block;
+    margin-top: -1px;
+    margin-left: 0.3px;
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/9415478d/ambari-web/app/templates/wizard/step4.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/step4.hbs b/ambari-web/app/templates/wizard/step4.hbs
index 9f1d7df..5a08250 100644
--- a/ambari-web/app/templates/wizard/step4.hbs
+++ b/ambari-web/app/templates/wizard/step4.hbs
@@ -34,17 +34,17 @@
         </tr>
         </thead>
         <tbody>
-        {{#each controller}}
+        {{#each service in controller}}
           {{#unless isHiddenOnSelectServicePage}}
-            <tr {{QAAttr "service-row"}} {{bindAttr class="isSelected:active isSelected:service-selected"}}>
-              <td {{QAAttr "service-name"}}>{{displayNameOnSelectServicePage}}</td>
-              <td {{QAAttr "service-version"}}>{{serviceVersionDisplay}}</td>
-              <td {{QAAttr "service-description"}}>{{{comments}}}</td>
+            <tr {{QAAttr "service-row"}} {{bindAttr class="service.isSelected:active service.isSelected:service-selected"}} {{action toggleCheckBox service target="view"}}>
+              <td {{QAAttr "service-name"}}>{{service.displayNameOnSelectServicePage}}</td>
+              <td {{QAAttr "service-version"}}>{{service.serviceVersionDisplay}}</td>
+              <td {{QAAttr "service-description"}}>{{{service.comments}}}</td>
               <td>
                 <div class="checkbox">
-                  {{view App.CheckboxView checkboxClassNamesBinding="serviceName" data-qa="toggle-service"
-                  disabledBinding="isDisabled"
-                  checkedBinding="isSelected"
+                  {{view App.CheckboxView checkboxClassNamesBinding="service.serviceName" data-qa="toggle-service"
+                  disabledBinding="service.isDisabled"
+                  checkedBinding="service.isSelected"
                   }}
                 </div>
               </td>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9415478d/ambari-web/app/views/wizard/step4_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/wizard/step4_view.js b/ambari-web/app/views/wizard/step4_view.js
index 137b9f5..928d46c 100644
--- a/ambari-web/app/views/wizard/step4_view.js
+++ b/ambari-web/app/views/wizard/step4_view.js
@@ -21,6 +21,11 @@ var App = require('app');
 
 App.WizardStep4View = Em.View.extend({
 
-  templateName: require('templates/wizard/step4')
+  templateName: require('templates/wizard/step4'),
 
+  toggleCheckBox: function(event) {
+    if (event.context.get('isDisabled')) { return; }
+    var isSelected = event.context.get('isSelected');
+    event.context.set('isSelected', !isSelected);
+  }
 });


[26/50] [abbrv] ambari git commit: AMBARI-21068 : Kafka broker goes down after Ambari upgrade from 2.5.0 to 2.5.1 due to missing 'kafka.timeline.metrics.instanceId' property. (dsen via avijayan)

Posted by ad...@apache.org.
AMBARI-21068 : Kafka broker goes down after Ambari upgrade from 2.5.0 to 2.5.1 due to missing 'kafka.timeline.metrics.instanceId' property. (dsen via avijayan)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c9f705de
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c9f705de
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c9f705de

Branch: refs/heads/ambari-rest-api-explorer
Commit: c9f705de801aac16f7da4550bcd180ff56b8c685
Parents: ae40bed
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Fri May 19 16:39:55 2017 -0700
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Fri May 19 16:39:55 2017 -0700

----------------------------------------------------------------------
 .../org/apache/ambari/server/upgrade/UpgradeCatalog251.java     | 1 +
 .../org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java | 5 +++++
 2 files changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c9f705de/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
index 5ed33a8..5e2eb16 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
@@ -104,6 +104,7 @@ public class UpgradeCatalog251 extends AbstractUpgradeCatalog {
    */
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
+    addNewConfigurationsFromXml();
     updateKAFKAConfigs();
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9f705de/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
index d725ec4..fda5f0e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
@@ -179,11 +179,16 @@ public class UpgradeCatalog251Test {
   @Test
   public void testExecuteDMLUpdates() throws Exception {
     Method updateKAFKAConfigs = UpgradeCatalog251.class.getDeclaredMethod("updateKAFKAConfigs");
+    Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
 
     UpgradeCatalog251 upgradeCatalog251 = createMockBuilder(UpgradeCatalog251.class)
         .addMockedMethod(updateKAFKAConfigs)
+        .addMockedMethod(addNewConfigurationsFromXml)
         .createMock();
 
+    upgradeCatalog251.addNewConfigurationsFromXml();
+    expectLastCall().once();
+
     Field field = AbstractUpgradeCatalog.class.getDeclaredField("dbAccessor");
     field.set(upgradeCatalog251, dbAccessor);
 


[11/50] [abbrv] ambari git commit: AMBARI-20758 Aggregate local metrics for minute aggregation time window (additional patch) (dsen)

Posted by ad...@apache.org.
AMBARI-20758 Aggregate local metrics for minute aggregation time window (additional patch) (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2e27f661
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2e27f661
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2e27f661

Branch: refs/heads/ambari-rest-api-explorer
Commit: 2e27f661947cb72f4004eb17903caf1f76012468
Parents: dad7475
Author: Dmytro Sen <ds...@apache.org>
Authored: Thu May 18 13:47:53 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Thu May 18 13:47:53 2017 +0300

----------------------------------------------------------------------
 ambari-metrics/ambari-metrics-host-aggregator/pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2e27f661/ambari-metrics/ambari-metrics-host-aggregator/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/pom.xml b/ambari-metrics/ambari-metrics-host-aggregator/pom.xml
index c2c7897..0598bef 100644
--- a/ambari-metrics/ambari-metrics-host-aggregator/pom.xml
+++ b/ambari-metrics/ambari-metrics-host-aggregator/pom.xml
@@ -29,7 +29,7 @@
     <artifactId>ambari-metrics-host-aggregator</artifactId>
     <packaging>jar</packaging>
 
-    <name>ambari-metrics-host-aggregator</name>
+    <name>Ambari Metrics Host Aggregator</name>
     <url>http://maven.apache.org</url>
 
     <properties>
@@ -92,7 +92,7 @@
                 <artifactId>maven-shade-plugin</artifactId>
                 <version>1.6</version>
                 <configuration>
-                    <createDependencyReducedPom>true</createDependencyReducedPom>
+                    <createDependencyReducedPom>false</createDependencyReducedPom>
                     <filters>
                         <filter>
                             <artifact>*:*</artifact>


[47/50] [abbrv] ambari git commit: AMBARI-20812. Hive view 1.5 does not work in Safari8 and IE11 (pallavkul)

Posted by ad...@apache.org.
AMBARI-20812. Hive view 1.5 does not work in Safari8 and IE11 (pallavkul)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/32501f68
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/32501f68
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/32501f68

Branch: refs/heads/ambari-rest-api-explorer
Commit: 32501f68e6ecda58bf3a6eef4be10264d0bd0669
Parents: 201677b
Author: pallavkul <pa...@gmail.com>
Authored: Tue May 23 12:27:34 2017 +0530
Committer: pallavkul <pa...@gmail.com>
Committed: Tue May 23 12:27:34 2017 +0530

----------------------------------------------------------------------
 .../src/main/resources/ui/hive-web/Brocfile.js  |   1 +
 .../ui/hive-web/vendor/browser-pollyfills.js    | 213 +++++++++++++++++++
 2 files changed, 214 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/32501f68/contrib/views/hive-next/src/main/resources/ui/hive-web/Brocfile.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive-next/src/main/resources/ui/hive-web/Brocfile.js b/contrib/views/hive-next/src/main/resources/ui/hive-web/Brocfile.js
index 318d1f8..791c88d 100644
--- a/contrib/views/hive-next/src/main/resources/ui/hive-web/Brocfile.js
+++ b/contrib/views/hive-next/src/main/resources/ui/hive-web/Brocfile.js
@@ -50,5 +50,6 @@ app.import('vendor/codemirror/show-hint.js');
 app.import('vendor/codemirror/codemirror.css');
 app.import('vendor/codemirror/show-hint.css');
 app.import('vendor/dagre.min.js');
+app.import('vendor/browser-pollyfills.js');
 
 module.exports = app.toTree();

http://git-wip-us.apache.org/repos/asf/ambari/blob/32501f68/contrib/views/hive-next/src/main/resources/ui/hive-web/vendor/browser-pollyfills.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive-next/src/main/resources/ui/hive-web/vendor/browser-pollyfills.js b/contrib/views/hive-next/src/main/resources/ui/hive-web/vendor/browser-pollyfills.js
new file mode 100644
index 0000000..88a59c1
--- /dev/null
+++ b/contrib/views/hive-next/src/main/resources/ui/hive-web/vendor/browser-pollyfills.js
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+if (!String.prototype.startsWith) {
+  String.prototype.startsWith = function (searchString, position) {
+    position = position || 0;
+    return this.substr(position, searchString.length) === searchString;
+  };
+}
+
+if (!String.prototype.endsWith) {
+  String.prototype.endsWith = function (searchString, position) {
+    var subjectString = this.toString();
+    if (typeof position !== 'number' || !isFinite(position) || Math.floor(position) !== position || position > subjectString.length) {
+      position = subjectString.length;
+    }
+    position -= searchString.length;
+    var lastIndex = subjectString.lastIndexOf(searchString, position);
+    return lastIndex !== -1 && lastIndex === position;
+  };
+}
+
+if (typeof Object.assign != 'function') {
+  Object.assign = function (target, varArgs) { // .length of function is 2
+    'use strict';
+    if (target == null) { // TypeError if undefined or null
+      throw new TypeError('Cannot convert undefined or null to object');
+    }
+
+    var to = Object(target);
+
+    for (var index = 1; index < arguments.length; index++) {
+      var nextSource = arguments[index];
+
+      if (nextSource != null) { // Skip over if undefined or null
+        for (var nextKey in nextSource) {
+          // Avoid bugs when hasOwnProperty is shadowed
+          if (Object.prototype.hasOwnProperty.call(nextSource, nextKey)) {
+            to[nextKey] = nextSource[nextKey];
+          }
+        }
+      }
+    }
+    return to;
+  };
+}
+
+
+if (!Array.from) {
+  Array.from = (function () {
+    var toStr = Object.prototype.toString;
+    var isCallable = function (fn) {
+      return typeof fn === 'function' || toStr.call(fn) === '[object Function]';
+    };
+    var toInteger = function (value) {
+      var number = Number(value);
+      if (isNaN(number)) {
+        return 0;
+      }
+      if (number === 0 || !isFinite(number)) {
+        return number;
+      }
+      return (number > 0 ? 1 : -1) * Math.floor(Math.abs(number));
+    };
+    var maxSafeInteger = Math.pow(2, 53) - 1;
+    var toLength = function (value) {
+      var len = toInteger(value);
+      return Math.min(Math.max(len, 0), maxSafeInteger);
+    };
+
+    // The length property of the from method is 1.
+    return function from(arrayLike/*, mapFn, thisArg */) {
+      // 1. Let C be the this value.
+      var C = this;
+
+      // 2. Let items be ToObject(arrayLike).
+      var items = Object(arrayLike);
+
+      // 3. ReturnIfAbrupt(items).
+      if (arrayLike == null) {
+        throw new TypeError('Array.from requires an array-like object - not null or undefined');
+      }
+
+      // 4. If mapfn is undefined, then let mapping be false.
+      var mapFn = arguments.length > 1 ? arguments[1] : void undefined;
+      var T;
+      if (typeof mapFn !== 'undefined') {
+        // 5. else
+        // 5. a If IsCallable(mapfn) is false, throw a TypeError exception.
+        if (!isCallable(mapFn)) {
+          throw new TypeError('Array.from: when provided, the second argument must be a function');
+        }
+
+        // 5. b. If thisArg was supplied, let T be thisArg; else let T be undefined.
+        if (arguments.length > 2) {
+          T = arguments[2];
+        }
+      }
+
+      // 10. Let lenValue be Get(items, "length").
+      // 11. Let len be ToLength(lenValue).
+      var len = toLength(items.length);
+
+      // 13. If IsConstructor(C) is true, then
+      // 13. a. Let A be the result of calling the [[Construct]] internal method
+      // of C with an argument list containing the single item len.
+      // 14. a. Else, Let A be ArrayCreate(len).
+      var A = isCallable(C) ? Object(new C(len)) : new Array(len);
+
+      // 16. Let k be 0.
+      var k = 0;
+      // 17. Repeat, while k < len… (also steps a - h)
+      var kValue;
+      while (k < len) {
+        kValue = items[k];
+        if (mapFn) {
+          A[k] = typeof T === 'undefined' ? mapFn(kValue, k) : mapFn.call(T, kValue, k);
+        } else {
+          A[k] = kValue;
+        }
+        k += 1;
+      }
+      // 18. Let putStatus be Put(A, "length", len, true).
+      A.length = len;
+      // 20. Return A.
+      return A;
+    };
+  }());
+}
+
+Number.isNaN = Number.isNaN || function (value) {
+    return typeof value === 'number' && isNaN(value);
+  }
+
+
+if (!String.fromCodePoint) {
+  (function () {
+    var defineProperty = (function () {
+      // IE 8 only supports `Object.defineProperty` on DOM elements
+      try {
+        var object = {};
+        var $defineProperty = Object.defineProperty;
+        var result = $defineProperty(object, object, object) && $defineProperty;
+      } catch (error) {
+      }
+      return result;
+    }());
+    var stringFromCharCode = String.fromCharCode;
+    var floor = Math.floor;
+    var fromCodePoint = function () {
+      var MAX_SIZE = 0x4000;
+      var codeUnits = [];
+      var highSurrogate;
+      var lowSurrogate;
+      var index = -1;
+      var length = arguments.length;
+      if (!length) {
+        return '';
+      }
+      var result = '';
+      while (++index < length) {
+        var codePoint = Number(arguments[index]);
+        if (
+          !isFinite(codePoint) ||       // `NaN`, `+Infinity`, or `-Infinity`
+          codePoint < 0 ||              // not a valid Unicode code point
+          codePoint > 0x10FFFF ||       // not a valid Unicode code point
+          floor(codePoint) != codePoint // not an integer
+        ) {
+          throw RangeError('Invalid code point: ' + codePoint);
+        }
+        if (codePoint <= 0xFFFF) { // BMP code point
+          codeUnits.push(codePoint);
+        } else { // Astral code point; split in surrogate halves
+          // http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
+          codePoint -= 0x10000;
+          highSurrogate = (codePoint >> 10) + 0xD800;
+          lowSurrogate = (codePoint % 0x400) + 0xDC00;
+          codeUnits.push(highSurrogate, lowSurrogate);
+        }
+        if (index + 1 == length || codeUnits.length > MAX_SIZE) {
+          result += stringFromCharCode.apply(null, codeUnits);
+          codeUnits.length = 0;
+        }
+      }
+      return result;
+    };
+    if (defineProperty) {
+      defineProperty(String, 'fromCodePoint', {
+        'value': fromCodePoint,
+        'configurable': true,
+        'writable': true
+      });
+    } else {
+      String.fromCodePoint = fromCodePoint;
+    }
+  }());
+}


[41/50] [abbrv] ambari git commit: AMBARI-21060. HDP 3.0 TP - create service definition for Oozie with configs, kerberos, widgets, etc.(vbrodetskyi)

Posted by ad...@apache.org.
AMBARI-21060. HDP 3.0 TP - create service definition for Oozie with configs, kerberos, widgets, etc.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cdc18ecb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cdc18ecb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cdc18ecb

Branch: refs/heads/ambari-rest-api-explorer
Commit: cdc18ecb9d06ef6810962fb3742d75781ff8e831
Parents: 74972de
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Mon May 22 15:41:25 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Mon May 22 15:42:16 2017 +0300

----------------------------------------------------------------------
 .../common-services/OOZIE/4.2.0.3.0/alerts.json |  45 ++
 .../OOZIE/4.2.0.3.0/configuration/oozie-env.xml | 255 +++++++++
 .../4.2.0.3.0/configuration/oozie-log4j.xml     | 149 ++++++
 .../4.2.0.3.0/configuration/oozie-site.xml      | 254 +++++++++
 .../OOZIE/4.2.0.3.0/kerberos.json               |  70 +++
 .../OOZIE/4.2.0.3.0/metainfo.xml                | 203 ++++++++
 .../package/alerts/alert_check_oozie_server.py  | 244 +++++++++
 .../4.2.0.3.0/package/files/oozieSmoke2.sh      |  84 +++
 .../files/prepareOozieHdfsDirectories.sh        |  42 ++
 .../4.2.0.3.0/package/files/wrap_ooziedb.sh     |  31 ++
 .../scripts/check_oozie_server_status.py        |  38 ++
 .../OOZIE/4.2.0.3.0/package/scripts/oozie.py    | 516 +++++++++++++++++++
 .../4.2.0.3.0/package/scripts/oozie_client.py   |  78 +++
 .../4.2.0.3.0/package/scripts/oozie_server.py   | 163 ++++++
 .../package/scripts/oozie_server_upgrade.py     | 237 +++++++++
 .../4.2.0.3.0/package/scripts/oozie_service.py  | 188 +++++++
 .../OOZIE/4.2.0.3.0/package/scripts/params.py   |  39 ++
 .../4.2.0.3.0/package/scripts/params_linux.py   | 374 ++++++++++++++
 .../4.2.0.3.0/package/scripts/params_windows.py |  34 ++
 .../4.2.0.3.0/package/scripts/service_check.py  | 140 +++++
 .../4.2.0.3.0/package/scripts/status_params.py  |  65 +++
 .../package/templates/adminusers.txt.j2         |  28 +
 .../templates/input.config-oozie.json.j2        |  48 ++
 .../package/templates/oozie-log4j.properties.j2 |  93 ++++
 .../4.2.0.3.0/package/templates/oozie.conf.j2   |  35 ++
 .../package/templates/zkmigrator_jaas.conf.j2   |  26 +
 .../OOZIE/4.2.0.3.0/quicklinks/quicklinks.json  |  45 ++
 .../OOZIE/4.2.0.3.0/role_command_order.json     |   9 +
 .../OOZIE/4.2.0.3.0/themes/theme.json           | 116 +++++
 .../stacks/HDP/3.0/services/OOZIE/metainfo.xml  |  27 +
 30 files changed, 3676 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/alerts.json b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/alerts.json
new file mode 100644
index 0000000..a1d267f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/alerts.json
@@ -0,0 +1,45 @@
+{
+  "OOZIE": {
+    "service": [],
+    "OOZIE_SERVER": [
+      {
+        "name": "oozie_server_webui",
+        "label": "Oozie Server Web UI",
+        "description": "This host-level alert is triggered if the Oozie server Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{oozie-site/oozie.base.url}}/?user.name={{oozie-env/oozie_user}}",
+            "kerberos_keytab": "{{cluster-env/smokeuser_keytab}}",
+            "kerberos_principal": "{{cluster-env/smokeuser_principal_name}}",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "oozie_server_status",
+        "label": "Oozie Server Status",
+        "description": "This host-level alert is triggered if the Oozie server cannot be determined to be up and responding to client requests.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "SCRIPT",
+          "path": "OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-env.xml b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-env.xml
new file mode 100644
index 0000000..0f67356
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-env.xml
@@ -0,0 +1,255 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>oozie_user</name>
+    <display-name>Oozie User</display-name>
+    <value>oozie</value>
+    <property-type>USER</property-type>
+    <description>Oozie User.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_admin_users</name>
+    <value>{oozie_user}, oozie-admin</value>
+    <description>Oozie admin users.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>oozie_data_dir</name>
+    <value>/hadoop/oozie/data</value>
+    <display-name>Oozie Data Dir</display-name>
+    <description>Data directory in which the Oozie DB exists</description>
+    <value-attributes>
+      <type>directory</type>
+      <empty-value-valid>true</empty-value-valid>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_log_dir</name>
+    <value>/var/log/oozie</value>
+    <display-name>Oozie Log Dir</display-name>
+    <description>Directory for oozie logs</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_tmp_dir</name>
+    <value>/var/tmp/oozie</value>
+    <display-name>Oozie Tmp Dir</display-name>
+    <description>Directory for oozie temporary files</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_pid_dir</name>
+    <value>/var/run/oozie</value>
+    <display-name>Oozie PID Dir</display-name>
+    <description>Directory in which the pid files for oozie reside.</description>
+    <value-attributes>
+      <type>directory</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_admin_port</name>
+    <value>11001</value>
+    <display-name>Oozie Server Admin Port</display-name>
+    <description>The admin port Oozie server runs.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_heapsize</name>
+    <value>2048</value>
+    <description>Oozie heap size.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_permsize</name>
+    <value>256</value>
+    <description>Oozie permanent generation size.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_user_nofile_limit</name>
+    <value>32000</value>
+    <description>Max open files limit setting for OOZIE user.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_user_nproc_limit</name>
+    <value>16000</value>
+    <description>Max number of processes limit setting for OOZIE user.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!-- oozie-env.sh -->
+
+  <property>
+    <name>service_check_job_name</name>
+    <value>no-op</value>
+    <description>
+      Job name from Oozie examples that will be executed at each Oozie service check action.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+
+  <property>
+    <name>content</name>
+    <display-name>oozie-env template</display-name>
+    <description>This is the jinja template for oozie-env.sh file</description>
+    <value>
+#!/bin/bash
+
+if [ -d "/usr/lib/bigtop-tomcat" ]; then
+  export OOZIE_CONFIG=${OOZIE_CONFIG:-{{conf_dir}}}
+  export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}
+  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}
+  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat
+fi
+
+#Set JAVA HOME
+export JAVA_HOME={{java_home}}
+
+export JRE_HOME=${JAVA_HOME}
+
+# Set Oozie specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs Oozie
+# Java System properties for Oozie should be specified in this variable
+#
+{% if java_version &lt; 8 %}
+export CATALINA_OPTS="$CATALINA_OPTS -Xmx{{oozie_heapsize}} -XX:MaxPermSize={{oozie_permsize}}"
+{% else %}
+export CATALINA_OPTS="$CATALINA_OPTS -Xmx{{oozie_heapsize}}"
+{% endif %}
+# Oozie configuration file to load from Oozie configuration directory
+#
+# export OOZIE_CONFIG_FILE=oozie-site.xml
+
+# Oozie logs directory
+#
+export OOZIE_LOG={{oozie_log_dir}}
+
+# Oozie pid directory
+#
+export CATALINA_PID={{pid_file}}
+
+#Location of the data for oozie
+export OOZIE_DATA={{oozie_data_dir}}
+
+# Oozie Log4J configuration file to load from Oozie configuration directory
+#
+# export OOZIE_LOG4J_FILE=oozie-log4j.properties
+
+# Reload interval of the Log4J configuration file, in seconds
+#
+# export OOZIE_LOG4J_RELOAD=10
+
+# The port Oozie server runs
+#
+export OOZIE_HTTP_PORT={{oozie_server_port}}
+
+# The admin port Oozie server runs
+#
+export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
+
+# The host name Oozie server runs on
+#
+# export OOZIE_HTTP_HOSTNAME=`hostname -f`
+
+# The base URL for callback URLs to Oozie
+#
+# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
+export JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-{{architecture}}-64
+
+# At least 1 minute of retry time to account for server downtime during
+# upgrade/downgrade
+export OOZIE_CLIENT_OPTS="${OOZIE_CLIENT_OPTS} -Doozie.connection.retry.count=5 "
+
+{% if sqla_db_used or lib_dir_available %}
+export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
+export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
+{% endif %}
+
+# Set Hadoop-related properties
+export HADOOP_OPTS="-Dhdp.version=${HDP_VERSION} ${HADOOP_OPTS}"
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_database</name>
+    <value>New Derby Database</value>
+    <display-name>Oozie Database</display-name>
+    <description>Oozie Server Database.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>New Derby Database</value>
+          <label>New Derby</label>
+        </entry>
+        <entry>
+          <value>Existing MySQL / MariaDB Database</value>
+          <label>Existing MySQL / MariaDB</label>
+        </entry>
+        <entry>
+          <value>Existing PostgreSQL Database</value>
+          <label>Existing PostgreSQL</label>
+        </entry>
+        <entry>
+          <value>Existing Oracle Database</value>
+          <label>Existing Oracle</label>
+        </entry>
+        <entry>
+          <value>Existing SQL Anywhere Database</value>
+          <label>Existing SQL Anywhere</label>
+        </entry>
+      </entries>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-log4j.xml b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-log4j.xml
new file mode 100644
index 0000000..005cc0e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-log4j.xml
@@ -0,0 +1,149 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="false">
+
+  <property>
+    <name>oozie_log_maxhistory</name>
+    <value>720</value>
+    <description>The number of hours for which log files will be retained</description>
+    <display-name>Oozie Log: # Hours of Log Rentention</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>oozie-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
+# XLogService sets its value to '${oozie.home}/logs'
+
+# The appender that Oozie uses must be named 'oozie' (i.e. log4j.appender.oozie)
+
+# Using the RollingFileAppender with the OozieRollingPolicy will roll the log file every hour and retain up to MaxHistory number of
+# log files. If FileNamePattern ends with ".gz" it will create gzip files.
+log4j.appender.oozie=org.apache.log4j.rolling.RollingFileAppender
+log4j.appender.oozie.RollingPolicy=org.apache.oozie.util.OozieRollingPolicy
+log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
+log4j.appender.oozie.Append=true
+log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
+# The FileNamePattern must end with "-%d{yyyy-MM-dd-HH}.gz" or "-%d{yyyy-MM-dd-HH}" and also start with the
+# value of log4j.appender.oozie.File
+log4j.appender.oozie.RollingPolicy.FileNamePattern=${log4j.appender.oozie.File}-%d{yyyy-MM-dd-HH}
+# The MaxHistory controls how many log files will be retained (720 hours / 24 hours per day = 30 days); -1 to disable
+log4j.appender.oozie.RollingPolicy.MaxHistory={{oozie_log_maxhistory}}
+
+
+
+log4j.appender.oozieError=org.apache.log4j.rolling.RollingFileAppender
+log4j.appender.oozieError.RollingPolicy=org.apache.oozie.util.OozieRollingPolicy
+log4j.appender.oozieError.File=${oozie.log.dir}/oozie-error.log
+log4j.appender.oozieError.Append=true
+log4j.appender.oozieError.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieError.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
+# The FileNamePattern must end with "-%d{yyyy-MM-dd-HH}.gz" or "-%d{yyyy-MM-dd-HH}" and also start with the
+# value of log4j.appender.oozieError.File
+log4j.appender.oozieError.RollingPolicy.FileNamePattern=${log4j.appender.oozieError.File}-%d{yyyy-MM-dd-HH}
+# The MaxHistory controls how many log files will be retained (720 hours / 24 hours per day = 30 days); -1 to disable
+log4j.appender.oozieError.RollingPolicy.MaxHistory=720
+log4j.appender.oozieError.filter.1 = org.apache.log4j.varia.LevelMatchFilter
+log4j.appender.oozieError.filter.1.levelToMatch = WARN
+log4j.appender.oozieError.filter.2 = org.apache.log4j.varia.LevelMatchFilter
+log4j.appender.oozieError.filter.2.levelToMatch = ERROR
+log4j.appender.oozieError.filter.3 = org.apache.log4j.varia.LevelMatchFilter
+log4j.appender.oozieError.filter.3.levelToMatch = FATAL
+log4j.appender.oozieError.filter.4 = org.apache.log4j.varia.DenyAllFilter
+
+
+
+# Uncomment the below two lines to use the DailyRollingFileAppender instead
+# The DatePattern must end with either "dd" or "HH"
+#log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
+
+log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
+log4j.appender.oozieops.Append=true
+log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
+log4j.appender.oozieinstrumentation.Append=true
+log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
+log4j.appender.oozieaudit.Append=true
+log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
+log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
+log4j.appender.openjpa.Append=true
+log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
+log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.logger.openjpa=INFO, openjpa
+log4j.logger.oozieops=INFO, oozieops
+log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
+log4j.logger.oozieaudit=ALL, oozieaudit
+log4j.logger.org.apache.oozie=INFO, oozie, oozieError
+log4j.logger.org.apache.hadoop=WARN, oozie
+log4j.logger.org.mortbay=WARN, oozie
+log4j.logger.org.hsqldb=WARN, oozie
+log4j.logger.org.apache.hadoop.security.authentication.server=WARN, oozie
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-site.xml b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-site.xml
new file mode 100644
index 0000000..f68369a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-site.xml
@@ -0,0 +1,254 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+        
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration supports_final="true">
+  <!--
+      Refer to the oozie-default.xml file for the complete list of
+      Oozie configuration properties and their default values.
+  -->
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  <property>
+    <name>oozie.authentication.simple.anonymous.allowed</name>
+    <value>true</value>
+    <description>
+      Indicates if anonymous requests are allowed.
+      This setting is meaningful only when using 'simple' authentication.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>oozie.authentication.kerberos.name.rules</name>
+    <description>The mapping from kerberos principal names to local OS user names.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.authentication.type</name>
+    <value>simple</value>
+    <description>
+      Authentication used for Oozie HTTP endpoint, the supported values are: simple | kerberos |
+      #AUTHENTICATION_HANDLER_CLASSNAME#.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.base.url</name>
+    <value>http://localhost:11000/oozie</value>
+    <description>Base Oozie URL.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.credentials.credentialclasses</name>
+    <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials</value>
+    <description>
+      Credential Class to be used for HCat.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+    <value>*={{hadoop_conf_dir}}</value>
+    <description>
+      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+      the relevant Hadoop *-site.xml files. If the path is relative is looked within
+      the Oozie configuration directory; though the path can be absolute (i.e. to point
+      to Hadoop client conf/ directories in the local filesystem.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.kerberos.enabled</name>
+    <value>false</value>
+    <description>
+      Indicates if Oozie is configured to use Kerberos.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.URIHandlerService.uri.handlers</name>
+    <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler
+    </value>
+    <description>
+      Enlist the different uri handlers supported for data availability checks.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.services.ext</name>
+    <value>
+      org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService
+    </value>
+    <description>
+      To add/replace services defined in 'oozie.services' with custom implementations.
+      Class names must be separated by commas.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.db.schema.name</name>
+    <value>oozie</value>
+    <display-name>Database Name</display-name>
+    <description>
+      Oozie DataBase Name
+    </description>
+    <value-attributes>
+      <type>database</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.username</name>
+    <value>oozie</value>
+    <display-name>Database Username</display-name>
+    <description>
+      Database user name to use to connect to the database
+    </description>
+    <value-attributes>
+      <type>db_user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property require-input="true">
+    <name>oozie.service.JPAService.jdbc.password</name>
+    <value/>
+    <display-name>Database Password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>
+      DB user password.
+
+      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+      if empty Configuration assumes it is NULL.
+    </description>
+    <value-attributes>
+      <type>password</type>
+      <overridable>false</overridable>
+      <keystore>true</keystore>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.driver</name>
+    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+    <display-name>JDBC Driver Class</display-name>
+    <description>
+      JDBC driver class.
+    </description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>oozie-env</type>
+        <name>oozie_database</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.url</name>
+    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
+    <display-name>Database URL</display-name>
+    <description>
+      JDBC URL.
+    </description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>oozie-env</type>
+        <name>oozie_database</name>
+      </property>
+      <property>
+        <type>oozie-site</type>
+        <name>oozie.db.schema.name</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.AuthorizationService.security.enabled</name>
+    <value>true</value>
+    <description>
+      Specifies whether security (user name/admin role) is enabled or not.
+      If disabled any user can manage Oozie system and manage any job.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.authentication.simple.anonymous.allowed</name>
+    <value>true</value>
+    <description>
+      Indicates if anonymous requests are allowed when using 'simple' authentication.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.SparkConfigurationService.spark.configurations</name>
+    <value>*={{spark_conf_dir}}</value>
+    <description>
+      Comma separated AUTHORITY=SPARK_CONF_DIR, where AUTHORITY is the
+      HOST:PORT of the ResourceManager of a YARN cluster. The wildcard '*'
+      configuration is used when there is no exact match for an authority.
+      The SPARK_CONF_DIR contains the relevant spark-defaults.conf properties
+      file. If the path is relative is looked within the Oozie configuration
+      directory; though the path can be absolute.  This is only used when the
+      Spark master is set to either "yarn-client" or "yarn-cluster".
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.action.retry.interval</name>
+    <value>30</value>
+    <description>
+      The interval between retries of an action in case of failure
+    </description>
+    <value-attributes>
+      <type>custom</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/kerberos.json
new file mode 100644
index 0000000..f1092f5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/kerberos.json
@@ -0,0 +1,70 @@
+{
+  "services": [
+    {
+      "name": "OOZIE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "auth_to_local_properties" : [
+        "oozie-site/oozie.authentication.kerberos.name.rules"
+      ],
+      "configurations": [
+        {
+          "oozie-site": {
+            "oozie.authentication.type": "kerberos",
+            "oozie.service.AuthorizationService.authorization.enabled": "true",
+            "oozie.service.HadoopAccessorService.kerberos.enabled": "true",
+            "local.realm": "${realm}",
+            "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials",
+            "oozie.zookeeper.secure" : "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "OOZIE_SERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "oozie_server",
+              "principal": {
+                "value": "oozie/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "oozie-site/oozie.service.HadoopAccessorService.kerberos.principal",
+                "local_username" : "${oozie-env/oozie_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/oozie.service.keytab",
+                "owner": {
+                  "name": "${oozie-env/oozie_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "oozie-site/oozie.service.HadoopAccessorService.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "oozie-site/oozie.authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "oozie-site/oozie.authentication.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/metainfo.xml
new file mode 100644
index 0000000..d351cbe
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/metainfo.xml
@@ -0,0 +1,203 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE</name>
+      <credential-store>
+        <supported>true</supported>
+        <enabled>true</enabled>
+      </credential-store>
+      <displayName>Oozie</displayName>
+      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/legal/open-source-faq/"&gt;ExtJS&lt;/a&gt; Library.
+      </comment>
+      <version>4.2.0.3.0</version>
+      <components>
+        <component>
+          <name>OOZIE_SERVER</name>
+          <displayName>Oozie Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <reassignAllowed>true</reassignAllowed>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1800</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>oozie_app</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>OOZIE_CLIENT</name>
+          <displayName>Oozie Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>oozie-site.xml</fileName>
+              <dictionaryName>oozie-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>oozie-env.sh</fileName>
+              <dictionaryName>oozie-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>oozie-log4j.properties</fileName>
+              <dictionaryName>oozie-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>zip</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+              <skipUpgrade>true</skipUpgrade>
+              <condition>should_install_mysql_connector</condition>
+            </package>
+            <package>
+              <name>extjs</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,amazon2015,redhat6,redhat7,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>oozie_${stack_version}</name>
+            </package>
+            <package>
+              <name>falcon_${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>oozie-${stack_version}</name>
+            </package>
+            <package>
+              <name>falcon-${stack_version}</name>
+            </package>
+            <package>
+              <name>extjs</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <requiredServices>
+        <service>YARN</service>
+      </requiredServices>
+
+       <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <configuration-dependencies>
+        <config-type>oozie-site</config-type>
+        <config-type>oozie-env</config-type>
+        <config-type>oozie-log4j</config-type>
+        <config-type>yarn-site</config-type>
+        <config-type>hive-site</config-type>
+        <config-type>tez-site</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>core-site</config-type>
+        <config-type>application-properties</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/alerts/alert_check_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/alerts/alert_check_oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/alerts/alert_check_oozie_server.py
new file mode 100644
index 0000000..0e9fe74
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/alerts/alert_check_oozie_server.py
@@ -0,0 +1,244 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+import os
+import re
+
+from resource_management.core import global_lock
+from resource_management.core.environment import Environment
+from resource_management.core.resources import Execute
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import get_klist_path
+from ambari_commons.os_check import OSConst, OSCheck
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from urlparse import urlparse
+
+STACK_ROOT_PATTERN = "{{ stack_root }}"
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+if OSCheck.is_windows_family():
+  OOZIE_ENV_HTTPS_RE = r"set\s+OOZIE_HTTPS_PORT=(\d+)"
+else:
+  OOZIE_ENV_HTTPS_RE = r"export\s+OOZIE_HTTPS_PORT=(\d+)"
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+OOZIE_URL_KEY = '{{oozie-site/oozie.base.url}}'
+SECURITY_ENABLED = '{{cluster-env/security_enabled}}'
+OOZIE_USER = '{{oozie-env/oozie_user}}'
+OOZIE_CONF_DIR = "{0}/current/oozie-server/conf".format(STACK_ROOT_PATTERN)
+OOZIE_CONF_DIR_LEGACY = '/etc/oozie/conf'
+OOZIE_HTTPS_PORT = '{{oozie-site/oozie.https.port}}'
+OOZIE_ENV_CONTENT = '{{oozie-env/content}}'
+
+USER_KEYTAB_KEY = '{{oozie-site/oozie.service.HadoopAccessorService.keytab.file}}'
+USER_PRINCIPAL_KEY = '{{oozie-site/oozie.service.HadoopAccessorService.kerberos.principal}}'
+USER_KEY = '{{oozie-env/oozie_user}}'
+
+# default keytab location
+USER_KEYTAB_SCRIPT_PARAM_KEY = 'default.oozie.keytab'
+USER_KEYTAB_DEFAULT = '/etc/security/keytabs/oozie.headless.keytab'
+
+# default user principal
+USER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.oozie.principal'
+USER_PRINCIPAL_DEFAULT = 'oozie@EXAMPLE.COM'
+
+# default user
+USER_DEFAULT = 'oozie'
+
+STACK_ROOT_KEY = '{{cluster-env/stack_root}}'
+STACK_ROOT_DEFAULT = '/usr/hdp'
+
+class KerberosPropertiesNotFound(Exception): pass
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (OOZIE_URL_KEY,)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (OOZIE_URL_KEY, USER_PRINCIPAL_KEY, SECURITY_ENABLED, USER_KEYTAB_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
+          USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_ROOT_KEY)
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def get_check_command(oozie_url, host_name, configurations):
+  from resource_management.libraries.functions import reload_windows_env
+  reload_windows_env()
+  oozie_home = os.environ['OOZIE_HOME']
+  oozie_cmd = os.path.join(oozie_home, 'bin', 'oozie.cmd')
+  command = format("cmd /c {oozie_cmd} admin -oozie {oozie_url} -status")
+  return (command, None, None)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_check_command(oozie_url, host_name, configurations, parameters, only_kinit):
+  kerberos_env = None
+
+  user = USER_DEFAULT
+  if USER_KEY in configurations:
+    user = configurations[USER_KEY]
+
+  if is_security_enabled(configurations):
+    # defaults
+    user_keytab = USER_KEYTAB_DEFAULT
+    user_principal = USER_PRINCIPAL_DEFAULT
+
+    # check script params
+    if USER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
+      user_principal = parameters[USER_PRINCIPAL_SCRIPT_PARAM_KEY]
+      user_principal = user_principal.replace('_HOST', host_name.lower())
+    if USER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
+      user_keytab = parameters[USER_KEYTAB_SCRIPT_PARAM_KEY]
+
+    # check configurations last as they should always take precedence
+    if USER_PRINCIPAL_KEY in configurations:
+      user_principal = configurations[USER_PRINCIPAL_KEY]
+      user_principal = user_principal.replace('_HOST', host_name.lower())
+    if USER_KEYTAB_KEY in configurations:
+      user_keytab = configurations[USER_KEYTAB_KEY]
+
+    # Create the kerberos credentials cache (ccache) file and set it in the environment to use
+    # when executing curl
+    env = Environment.get_instance()
+    ccache_file = "{0}{1}oozie_alert_cc_{2}".format(env.tmp_dir, os.sep, os.getpid())
+    kerberos_env = {'KRB5CCNAME': ccache_file}
+
+    # Get the configured Kerberos executable search paths, if any
+    kerberos_executable_search_paths = None
+    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+      kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+
+    klist_path_local = get_klist_path(kerberos_executable_search_paths)
+    kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+    kinit_part_command = format("{kinit_path_local} -l 5m20s -c {ccache_file} -kt {user_keytab} {user_principal}; ")
+
+    # Determine if we need to kinit by testing to see if the relevant cache exists and has
+    # non-expired tickets.  Tickets are marked to expire after 5 minutes to help reduce the number
+    # it kinits we do but recover quickly when keytabs are regenerated
+
+    if only_kinit:
+      kinit_command = kinit_part_command
+    else:
+      kinit_command = "{0} -s {1} || ".format(klist_path_local, ccache_file) + kinit_part_command
+
+    # prevent concurrent kinit
+    kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
+    kinit_lock.acquire()
+    try:
+      Execute(kinit_command, environment=kerberos_env, user=user)
+    finally:
+      kinit_lock.release()
+
+  # Configure stack root
+  stack_root = STACK_ROOT_DEFAULT
+  if STACK_ROOT_KEY in configurations:
+    stack_root = configurations[STACK_ROOT_KEY].lower()
+
+  # oozie configuration directory using a symlink
+  oozie_config_directory = OOZIE_CONF_DIR.replace(STACK_ROOT_PATTERN, stack_root)
+  if not os.path.exists(oozie_config_directory):
+    oozie_config_directory = OOZIE_CONF_DIR_LEGACY
+
+  command = "source {0}/oozie-env.sh ; oozie admin -oozie {1} -status".format(
+    oozie_config_directory, oozie_url)
+
+  return (command, kerberos_env, user)
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return (RESULT_CODE_UNKNOWN, ['There were no configurations supplied to the script.'])
+
+  if not OOZIE_URL_KEY in configurations:
+    return (RESULT_CODE_UNKNOWN, ['The Oozie URL is a required parameter.'])
+
+  https_port = None
+  # try to get https port form oozie-env content
+  if OOZIE_ENV_CONTENT in configurations:
+    for line in configurations[OOZIE_ENV_CONTENT].splitlines():
+      result = re.match(OOZIE_ENV_HTTPS_RE, line)
+
+      if result is not None:
+        https_port = result.group(1)
+  # or from oozie-site.xml
+  if https_port is None and OOZIE_HTTPS_PORT in configurations:
+    https_port = configurations[OOZIE_HTTPS_PORT]
+
+  oozie_url = configurations[OOZIE_URL_KEY]
+
+  # construct proper url for https
+  if https_port is not None:
+    parsed_url = urlparse(oozie_url)
+    oozie_url = oozie_url.replace(parsed_url.scheme, "https")
+    if parsed_url.port is None:
+      oozie_url.replace(parsed_url.hostname, ":".join([parsed_url.hostname, str(https_port)]))
+    else:
+      oozie_url = oozie_url.replace(str(parsed_url.port), str(https_port))
+
+  # https will not work with localhost address, we need put fqdn
+  if https_port is None:
+    oozie_url = oozie_url.replace(urlparse(oozie_url).hostname, host_name)
+
+  (code, msg) = get_check_result(oozie_url, host_name, configurations, parameters, False)
+
+  # sometimes real lifetime for ticket is less than we have set(5m20s aS of now)
+  # so i've added this double check with rekinit command to be sure thaT it's not problem with ticket lifetime
+  if is_security_enabled(configurations) and code == RESULT_CODE_CRITICAL:
+    (code, msg) = get_check_result(oozie_url, host_name, configurations, parameters, True)
+
+  return (code, msg)
+
+
+def get_check_result(oozie_url, host_name, configurations, parameters, only_kinit):
+  try:
+    command, env, user = get_check_command(oozie_url, host_name, configurations, parameters, only_kinit)
+    # execute the command
+    Execute(command, environment=env, user=user)
+
+    return (RESULT_CODE_OK, ["Successful connection to {0}".format(oozie_url)])
+  except KerberosPropertiesNotFound, ex:
+    return (RESULT_CODE_UNKNOWN, [str(ex)])
+  except Exception, ex:
+    return (RESULT_CODE_CRITICAL, [str(ex)])
+
+def is_security_enabled(configurations):
+  security_enabled = False
+  if SECURITY_ENABLED in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED]).upper() == 'TRUE'
+
+  return security_enabled

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/oozieSmoke2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/oozieSmoke2.sh b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/oozieSmoke2.sh
new file mode 100644
index 0000000..60716ae
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/oozieSmoke2.sh
@@ -0,0 +1,84 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export os_family=$1
+export oozie_lib_dir=$2
+export oozie_conf_dir=$3
+export oozie_bin_dir=$4
+export oozie_server_url=$5
+export oozie_examples_dir=$6
+export hadoop_conf_dir=$7
+export hadoop_bin_dir=$8
+export smoke_test_user=$9
+export job_name=${10}
+export security_enabled=${11}
+export smoke_user_keytab=${12}
+export kinit_path_local=${13}
+export smokeuser_principal=${14}
+
+function checkOozieJobStatus {
+  local job_id=$1
+  local num_of_tries=$2
+  #default num_of_tries to 10 if not present
+  num_of_tries=${num_of_tries:-10}
+  local i=0
+  local rc=1
+  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
+  /var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"
+  while [ $i -lt $num_of_tries ] ; do
+    cmd_output=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+    (IFS='';echo $cmd_output)
+    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
+    echo "workflow_status=$act_status"
+    if [ "RUNNING" == "$act_status" ]; then
+      #increment the counter and get the status again after waiting for 15 secs
+      sleep 15
+      (( i++ ))
+      elif [ "SUCCEEDED" == "$act_status" ]; then
+        rc=0;
+        break;
+      else
+        rc=1
+        break;
+      fi
+    done
+    return $rc
+}
+
+export OOZIE_EXIT_CODE=0
+export OOZIE_SERVER=$oozie_server_url
+
+cd $oozie_examples_dir
+
+if [[ $security_enabled == "True" ]]; then
+  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smokeuser_principal}; "
+else 
+  kinitcmd=""
+fi
+
+cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $oozie_examples_dir/examples/apps/${job_name}/job.properties  -run"
+echo $cmd
+job_info=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd" | grep "job:"`
+job_id="`echo $job_info | cut -d':' -f2`"
+checkOozieJobStatus "$job_id" 15
+OOZIE_EXIT_CODE="$?"
+exit $OOZIE_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/prepareOozieHdfsDirectories.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/prepareOozieHdfsDirectories.sh b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/prepareOozieHdfsDirectories.sh
new file mode 100644
index 0000000..f2bee2d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/prepareOozieHdfsDirectories.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export oozie_conf_dir=$1
+export oozie_examples_dir=$2
+export hadoop_conf_dir=$3
+export JOBTRACKER=$4
+export NAMENODE=$5
+export QUEUE=$6
+export JOB_NAME=$7
+
+cd $oozie_examples_dir
+
+/var/lib/ambari-agent/ambari-sudo.sh tar -zxf oozie-examples.tar.gz
+/var/lib/ambari-agent/ambari-sudo.sh chmod -R o+rx examples
+
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|jobTracker=localhost:8032|jobTracker=$JOBTRACKER|g" examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|queueName=default|queueName=$QUEUE|g" examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/$JOB_NAME/job.properties

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/wrap_ooziedb.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/wrap_ooziedb.sh b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/wrap_ooziedb.sh
new file mode 100644
index 0000000..36576b5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/wrap_ooziedb.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
+EC=$?
+echo $OUT
+GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
+if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
+then
+  exit 0
+else
+  exit $EC
+fi  

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/check_oozie_server_status.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/check_oozie_server_status.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/check_oozie_server_status.py
new file mode 100644
index 0000000..7c69779
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/check_oozie_server_status.py
@@ -0,0 +1,38 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def check_oozie_server_status():
+  import status_params
+  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+
+  check_windows_service_status(status_params.oozie_server_win_service_name)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def check_oozie_server_status():
+  import status_params
+  from resource_management.libraries.functions.check_process_status import check_process_status
+
+  check_process_status(status_params.pid_file)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
new file mode 100644
index 0000000..def0545
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
@@ -0,0 +1,516 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+# Python Imports
+import os
+import re
+
+# Resource Management Imports
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.core.resources.system import Directory, Execute, File
+from resource_management.core.source import DownloadSource
+from resource_management.core.source import InlineTemplate
+from resource_management.core.source import Template
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.oozie_prepare_war import prepare_war
+from resource_management.libraries.functions.copy_tarball import get_current_version
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.security_commons import update_credential_provider_path
+from resource_management.core.resources.packaging import Package
+from resource_management.core.shell import as_user, as_sudo, call
+from resource_management.core.exceptions import Fail
+
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
+from ambari_commons.constants import SERVICE, UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
+from resource_management.libraries.functions.constants import Direction
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from ambari_commons.inet_utils import download_file
+
+from resource_management.core import Logger
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def oozie(is_server=False):
+  import params
+
+  from status_params import oozie_server_win_service_name
+
+  XmlConfig("oozie-site.xml",
+            conf_dir=params.oozie_conf_dir,
+            configurations=params.config['configurations']['oozie-site'],
+            owner=params.oozie_user,
+            mode='f',
+            configuration_attributes=params.config['configuration_attributes']['oozie-site']
+  )
+
+  File(os.path.join(params.oozie_conf_dir, "oozie-env.cmd"),
+       owner=params.oozie_user,
+       content=InlineTemplate(params.oozie_env_cmd_template)
+  )
+
+  Directory(params.oozie_tmp_dir,
+            owner=params.oozie_user,
+            create_parents = True,
+  )
+
+  if is_server:
+    # Manually overriding service logon user & password set by the installation package
+    ServiceConfig(oozie_server_win_service_name,
+                  action="change_user",
+                  username = params.oozie_user,
+                  password = Script.get_password(params.oozie_user))
+
+  download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+                      os.path.join(params.oozie_root, "extra_libs", "sqljdbc4.jar")
+  )
+  webapps_sqljdbc_path = os.path.join(params.oozie_home, "oozie-server", "webapps", "oozie", "WEB-INF", "lib", "sqljdbc4.jar")
+  if os.path.isfile(webapps_sqljdbc_path):
+    download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+                        webapps_sqljdbc_path
+    )
+  download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+                      os.path.join(params.oozie_home, "share", "lib", "oozie", "sqljdbc4.jar")
+  )
+  download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+                      os.path.join(params.oozie_home, "temp", "WEB-INF", "lib", "sqljdbc4.jar")
+  )
+
+# TODO: see if see can remove this
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def oozie(is_server=False):
+  import params
+
+  if is_server:
+    params.HdfsResource(params.oozie_hdfs_user_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.oozie_user,
+                         mode=params.oozie_hdfs_user_mode
+    )
+    params.HdfsResource(None, action="execute")
+  Directory(params.conf_dir,
+             create_parents = True,
+             owner = params.oozie_user,
+             group = params.user_group
+  )
+
+  params.oozie_site = update_credential_provider_path(params.oozie_site,
+                                                      'oozie-site',
+                                                      os.path.join(params.conf_dir, 'oozie-site.jceks'),
+                                                      params.oozie_user,
+                                                      params.user_group
+                                                      )
+
+  XmlConfig("oozie-site.xml",
+    conf_dir = params.conf_dir,
+    configurations = params.oozie_site,
+    configuration_attributes=params.config['configuration_attributes']['oozie-site'],
+    owner = params.oozie_user,
+    group = params.user_group,
+    mode = 0664
+  )
+  File(format("{conf_dir}/oozie-env.sh"),
+    owner=params.oozie_user,
+    content=InlineTemplate(params.oozie_env_sh_template),
+    group=params.user_group,
+  )
+
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            create_parents=True,
+            owner='root',
+            group='root'
+  )
+
+  File(os.path.join(params.limits_conf_dir, 'oozie.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("oozie.conf.j2")
+  )
+
+  if (params.log4j_props != None):
+    File(format("{params.conf_dir}/oozie-log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.oozie_user,
+      content=InlineTemplate(params.log4j_props)
+    )
+  elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
+    File(format("{params.conf_dir}/oozie-log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.oozie_user
+    )
+
+  if params.stack_version_formatted and check_stack_feature(StackFeature.OOZIE_ADMIN_USER, params.stack_version_formatted):
+    File(format("{params.conf_dir}/adminusers.txt"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.oozie_user,
+      content=Template('adminusers.txt.j2', oozie_admin_users=params.oozie_admin_users)
+    )
+  else:
+    File ( format("{params.conf_dir}/adminusers.txt"),
+           owner = params.oozie_user,
+           group = params.user_group
+    )
+
+  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
+     params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
+     params.jdbc_driver_name == "org.postgresql.Driver" or \
+     params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+    File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
+      content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
+    )
+  pass
+
+  oozie_ownership()
+  
+  if is_server:      
+    oozie_server_specific()
+  
+def oozie_ownership():
+  import params
+  
+  File ( format("{conf_dir}/hadoop-config.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/oozie-default.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  Directory ( format("{conf_dir}/action-conf"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/action-conf/hive.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+def oozie_server_specific():
+  import params
+  
+  no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
+  
+  File(params.pid_file,
+    action="delete",
+    not_if=no_op_test
+  )
+  
+  oozie_server_directories = [format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir]
+  Directory( oozie_server_directories,
+    owner = params.oozie_user,
+    group = params.user_group,
+    mode = 0755,
+    create_parents = True,
+    cd_access="a",
+  )
+  
+  Directory(params.oozie_libext_dir,
+            create_parents = True,
+  )
+  
+  hashcode_file = format("{oozie_home}/.hashcode")
+  skip_recreate_sharelib = format("test -f {hashcode_file} && test -d {oozie_home}/share")
+
+  untar_sharelib = ('tar','-xvf',format('{oozie_home}/oozie-sharelib.tar.gz'),'-C',params.oozie_home)
+
+  Execute( untar_sharelib,    # time-expensive
+    not_if  = format("{no_op_test} || {skip_recreate_sharelib}"), 
+    sudo = True,
+  )
+
+  configure_cmds = []
+  configure_cmds.append(('cp', params.ext_js_path, params.oozie_libext_dir))
+  configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}')))
+  
+  Execute( configure_cmds,
+    not_if  = no_op_test,
+    sudo = True,
+  )
+  
+  Directory(params.oozie_webapps_conf_dir,
+            owner = params.oozie_user,
+            group = params.user_group,
+            recursive_ownership = True,
+            recursion_follow_links = True,
+  )
+
+  # download the database JAR
+  download_database_library_if_needed()
+
+  #falcon el extension
+  if params.has_falcon_host:
+    Execute(format('{sudo} cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}'),
+      not_if  = no_op_test)
+
+    Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
+      not_if  = no_op_test)
+
+  if params.lzo_enabled and len(params.all_lzo_packages) > 0:
+    Package(params.all_lzo_packages,
+            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+            retry_count=params.agent_stack_retry_count)
+    Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
+      not_if  = no_op_test,
+    )
+
+  prepare_war(params)
+
+  File(hashcode_file,
+       mode = 0644,
+  )
+
+  if params.stack_version_formatted and check_stack_feature(StackFeature.OOZIE_CREATE_HIVE_TEZ_CONFIGS, params.stack_version_formatted):
+    # Create hive-site and tez-site configs for oozie
+    Directory(params.hive_conf_dir,
+        create_parents = True,
+        owner = params.oozie_user,
+        group = params.user_group
+    )
+    if 'hive-site' in params.config['configurations']:
+      hive_site_config = update_credential_provider_path(params.config['configurations']['hive-site'],
+                                                         'hive-site',
+                                                         os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
+                                                         params.oozie_user,
+                                                         params.user_group
+                                                         )
+      XmlConfig("hive-site.xml",
+        conf_dir=params.hive_conf_dir,
+        configurations=hive_site_config,
+        configuration_attributes=params.config['configuration_attributes']['hive-site'],
+        owner=params.oozie_user,
+        group=params.user_group,
+        mode=0644
+    )
+    if 'tez-site' in params.config['configurations']:
+      XmlConfig( "tez-site.xml",
+        conf_dir = params.hive_conf_dir,
+        configurations = params.config['configurations']['tez-site'],
+        configuration_attributes=params.config['configuration_attributes']['tez-site'],
+        owner = params.oozie_user,
+        group = params.user_group,
+        mode = 0664
+    )
+
+    # If Atlas is also installed, need to generate Atlas Hive hook (hive-atlas-application.properties file) in directory
+    # {stack_root}/{current_version}/atlas/hook/hive/
+    # Because this is a .properties file instead of an xml file, it will not be read automatically by Oozie.
+    # However, should still save the file on this host so that can upload it to the Oozie Sharelib in DFS.
+    if has_atlas_in_cluster():
+      atlas_hook_filepath = os.path.join(params.hive_conf_dir, params.atlas_hook_filename)
+      Logger.info("Has atlas in cluster, will save Atlas Hive hook into location %s" % str(atlas_hook_filepath))
+      setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.oozie_user, params.user_group)
+
+  Directory(params.oozie_server_dir,
+    owner = params.oozie_user,
+    group = params.user_group,
+    recursive_ownership = True,  
+  )
+  if params.security_enabled:
+    File(os.path.join(params.conf_dir, 'zkmigrator_jaas.conf'),
+         owner=params.oozie_user,
+         group=params.user_group,
+         content=Template("zkmigrator_jaas.conf.j2")
+         )
+
+def __parse_sharelib_from_output(output):
+  """
+  Return the parent directory of the first path from the output of the "oozie admin -shareliblist command $comp"
+  Output will match pattern like:
+
+  Potential errors
+  [Available ShareLib]
+  hive
+    hdfs://server:8020/user/oozie/share/lib/lib_20160811235630/hive/file1.jar
+    hdfs://server:8020/user/oozie/share/lib/lib_20160811235630/hive/file2.jar
+  """
+  if output is not None:
+    pattern = re.compile(r"\[Available ShareLib\]\n\S*?\n(.*share.*)", re.IGNORECASE)
+    m = pattern.search(output)
+    if m and len(m.groups()) == 1:
+      jar_path = m.group(1)
+      # Remove leading/trailing spaces and get the containing directory
+      sharelib_dir = os.path.dirname(jar_path.strip())
+      return sharelib_dir
+  return None
+
+def copy_atlas_hive_hook_to_dfs_share_lib(upgrade_type=None, upgrade_direction=None):
+  """
+   If the Atlas Hive Hook direcotry is present, Atlas is installed, and this is the first Oozie Server,
+  then copy the entire contents of that directory to the Oozie Sharelib in DFS, e.g.,
+  /usr/$stack/$current_version/atlas/hook/hive/ -> hdfs:///user/oozie/share/lib/lib_$timetamp/hive
+
+  :param upgrade_type: If in the middle of a stack upgrade, the type as UPGRADE_TYPE_ROLLING or UPGRADE_TYPE_NON_ROLLING
+  :param upgrade_direction: If in the middle of a stack upgrade, the direction as Direction.UPGRADE or Direction.DOWNGRADE.
+  """
+  import params
+
+  # Calculate the effective version since this code can also be called during EU/RU in the upgrade direction.
+  effective_version = params.stack_version_formatted if upgrade_type is None else format_stack_version(params.version)
+  if not check_stack_feature(StackFeature.ATLAS_HOOK_SUPPORT, effective_version):
+    return
+    
+  # Important that oozie_server_hostnames is sorted by name so that this only runs on a single Oozie server.
+  if not (len(params.oozie_server_hostnames) > 0 and params.hostname == params.oozie_server_hostnames[0]):
+    Logger.debug("Will not attempt to copy Atlas Hive hook to DFS since this is not the first Oozie Server "
+                 "sorted by hostname.")
+    return
+
+  if not has_atlas_in_cluster():
+    Logger.debug("Will not attempt to copy Atlas Hve hook to DFS since Atlas is not installed on the cluster.")
+    return
+
+  if upgrade_type is not None and upgrade_direction == Direction.DOWNGRADE:
+    Logger.debug("Will not attempt to copy Atlas Hve hook to DFS since in the middle of Rolling/Express upgrade "
+                 "and performing a Downgrade.")
+    return
+
+  current_version = get_current_version()
+  atlas_hive_hook_dir = format("{stack_root}/{current_version}/atlas/hook/hive/")
+  if not os.path.exists(atlas_hive_hook_dir):
+    Logger.error(format("ERROR. Atlas is installed in cluster but this Oozie server doesn't "
+                        "contain directory {atlas_hive_hook_dir}"))
+    return
+
+  atlas_hive_hook_impl_dir = os.path.join(atlas_hive_hook_dir, "atlas-hive-plugin-impl")
+
+  num_files = len([name for name in os.listdir(atlas_hive_hook_impl_dir) if os.path.exists(os.path.join(atlas_hive_hook_impl_dir, name))])
+  Logger.info("Found %d files/directories inside Atlas Hive hook impl directory %s"% (num_files, atlas_hive_hook_impl_dir))
+
+  # This can return over 100 files, so take the first 5 lines after "Available ShareLib"
+  # Use -oozie http(s):localhost:{oozie_server_admin_port}/oozie as oozie-env does not export OOZIE_URL
+  command = format(r'source {conf_dir}/oozie-env.sh ; oozie admin -oozie {oozie_base_url} -shareliblist hive | grep "\[Available ShareLib\]" -A 5')
+  Execute(command,
+          user=params.oozie_user,
+          tries=10,
+          try_sleep=5,
+          logoutput=True,
+  )
+
+  hive_sharelib_dir = __parse_sharelib_from_output(out)
+
+  if hive_sharelib_dir is None:
+    raise Fail("Could not parse Hive sharelib from output.")
+
+  Logger.info(format("Parsed Hive sharelib = {hive_sharelib_dir} and will attempt to copy/replace {num_files} files to it from {atlas_hive_hook_impl_dir}"))
+
+  params.HdfsResource(hive_sharelib_dir,
+                      type="directory",
+                      action="create_on_execute",
+                      source=atlas_hive_hook_impl_dir,
+                      user=params.hdfs_user,
+                      owner=params.oozie_user,
+                      group=params.hdfs_user,
+                      mode=0755,
+                      recursive_chown=True,
+                      recursive_chmod=True,
+                      replace_existing_files=True
+                      )
+
+  Logger.info("Copying Atlas Hive hook properties file to Oozie Sharelib in DFS.")
+  atlas_hook_filepath_source = os.path.join(params.hive_conf_dir, params.atlas_hook_filename)
+  atlas_hook_file_path_dest_in_dfs = os.path.join(hive_sharelib_dir, params.atlas_hook_filename)
+  params.HdfsResource(atlas_hook_file_path_dest_in_dfs,
+                      type="file",
+                      source=atlas_hook_filepath_source,
+                      action="create_on_execute",
+                      owner=params.oozie_user,
+                      group=params.hdfs_user,
+                      mode=0755,
+                      replace_existing_files=True
+                      )
+  params.HdfsResource(None, action="execute")
+
+  # Update the sharelib after making any changes
+  # Use -oozie http(s):localhost:{oozie_server_admin_port}/oozie as oozie-env does not export OOZIE_URL
+  Execute(format("source {conf_dir}/oozie-env.sh ; oozie admin -oozie {oozie_base_url} -sharelibupdate"),
+          user=params.oozie_user,
+          tries=5,
+          try_sleep=5,
+          logoutput=True,
+  )
+
+
+def download_database_library_if_needed(target_directory = None):
+  """
+  Downloads the library to use when connecting to the Oozie database, if
+  necessary. The library will be downloaded to 'params.target' unless
+  otherwise specified.
+  :param target_directory: the location where the database library will be
+  downloaded to.
+  :return:
+  """
+  import params
+  jdbc_drivers = ["com.mysql.jdbc.Driver",
+    "com.microsoft.sqlserver.jdbc.SQLServerDriver",
+    "oracle.jdbc.driver.OracleDriver","sap.jdbc4.sqlanywhere.IDriver"]
+
+  # check to see if the JDBC driver name is in the list of ones that need to
+  # be downloaded
+  if params.jdbc_driver_name not in jdbc_drivers or not params.jdbc_driver_jar:
+    return
+
+  if params.previous_jdbc_jar and os.path.isfile(params.previous_jdbc_jar):
+    File(params.previous_jdbc_jar, action='delete')
+
+  # if the target directory is not specified
+  if target_directory is None:
+    target_jar_with_directory = params.target
+  else:
+    # create the full path using the supplied target directory and the JDBC JAR
+    target_jar_with_directory = target_directory + os.path.sep + params.jdbc_driver_jar
+
+  if not os.path.exists(target_jar_with_directory):
+    File(params.downloaded_custom_connector,
+      content = DownloadSource(params.driver_curl_source))
+
+    if params.sqla_db_used:
+      untar_sqla_type2_driver = ('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir)
+
+      Execute(untar_sqla_type2_driver, sudo = True)
+
+      Execute(format("yes | {sudo} cp {jars_path_in_archive} {oozie_libext_dir}"))
+
+      Directory(params.jdbc_libs_dir,
+                create_parents = True)
+
+      Execute(format("yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}"))
+
+      Execute(format("{sudo} chown -R {oozie_user}:{user_group} {oozie_libext_dir}/*"))
+
+    else:
+      Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target_jar_with_directory),
+        path=["/bin", "/usr/bin/"],
+        sudo = True)
+
+    File(target_jar_with_directory, owner = params.oozie_user,
+      group = params.user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cdc18ecb/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_client.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_client.py
new file mode 100644
index 0000000..f98ecfd
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_client.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+
+from oozie import oozie
+from oozie_service import oozie_service
+
+
+class OozieClient(Script):
+
+  def get_component_name(self):
+    return "oozie-client"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    oozie(is_server=False)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    # this function should not execute if the version can't be determined or
+    # the stack does not support rolling upgrade
+    if not (params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version)):
+      return
+
+    Logger.info("Executing Oozie Client Stack Upgrade pre-restart")
+    conf_select.select(params.stack_name, "oozie", params.version)
+    stack_select.select("oozie-client", params.version)
+
+  # We substitute some configs (oozie.authentication.kerberos.principal) before generation (see oozie.py and params.py).
+  # This function returns changed configs (it's used for config generation before config download)
+  def generate_configs_get_xml_file_content(self, filename, dictionary):
+    if dictionary == 'oozie-site':
+      import params
+      config = self.get_config()
+      return {'configurations': params.oozie_site,
+              'configuration_attributes': config['configuration_attributes'][dictionary]}
+    else:
+      return super(OozieClient, self).generate_configs_get_xml_file_content(filename, dictionary)
+
+if __name__ == "__main__":
+  OozieClient().execute()


[32/50] [abbrv] ambari git commit: Revert "Revert "ADDENDUM. AMBARI-21011. Upgrade Code. Append PATH to YARN config 'yarn.nodemanager.admin-env' for HDP 2.6.""

Posted by ad...@apache.org.
Revert "Revert "ADDENDUM. AMBARI-21011. Upgrade Code. Append PATH to YARN config 'yarn.nodemanager.admin-env' for HDP 2.6.""

This reverts commit 0a61f9857ec95162801fc2c8aae05fef67fbbd52.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ccd6b25e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ccd6b25e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ccd6b25e

Branch: refs/heads/ambari-rest-api-explorer
Commit: ccd6b25ed2f053a9b1b2045fd0cf41f4472e657e
Parents: 9ffef7f
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Mon May 22 01:10:15 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Mon May 22 01:11:15 2017 -0700

----------------------------------------------------------------------
 .../stacks/HDP/2.3/upgrades/config-upgrade.xml         |  8 ++++++++
 .../stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml |  6 ++++++
 .../resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml  |  1 +
 .../stacks/HDP/2.4/upgrades/config-upgrade.xml         |  6 +++++-
 .../stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml |  6 ++++++
 .../resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml  |  1 +
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml         |  8 ++++++++
 .../stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |  6 ++++++
 .../resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml  |  4 ++++
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml         | 13 +++++++++++++
 .../stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml |  7 +++++++
 .../resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml  |  1 +
 12 files changed, 66 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ccd6b25e/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
index 8b5c07d..98bb056 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
@@ -546,6 +546,14 @@
           </definition>
         </changes>
       </component>
+      <component name="NODEMANAGER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
+            <type>yarn-site</type>
+            <insert key="yarn.nodemanager.admin-env" value=",PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:$PATH" insert-type="append" newline-before="false" newline-after="false" />
+          </definition>
+        </changes>
+      </component>
     </service>
 
     <service name="MAPREDUCE2">

http://git-wip-us.apache.org/repos/asf/ambari/blob/ccd6b25e/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
index 5aa08c5..4d2b3ec 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
@@ -353,6 +353,12 @@
         </task>
       </execute-stage>
 
+      <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM admin env">
+        <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
+          <summary>Updating YARN NodeManager admin env config</summary>
+        </task>
+      </execute-stage>
+
       <!--Yarn Apptimeline server-->
       <execute-stage service="YARN" component="APP_TIMELINE_SERVER" title="Apply config changes for App timeline server">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixYarnWebServiceUrl">

http://git-wip-us.apache.org/repos/asf/ambari/blob/ccd6b25e/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
index d98bb53..f1dd943 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
@@ -789,6 +789,7 @@
       <component name="NODEMANAGER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_add_spark2_yarn_shuffle"/>
+          <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env"/>
         </pre-upgrade>
 
         <pre-downgrade/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ccd6b25e/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
index b3d19d4..b448a2d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
@@ -332,7 +332,11 @@
             <set key="yarn.nodemanager.aux-services" value="mapreduce_shuffle,spark_shuffle,spark2_shuffle"/>
             <!-- Ideally we need to append spark2_shuffle to the existing value -->
           </definition>
-        </changes>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
+            <type>yarn-site</type>
+            <insert key="yarn.nodemanager.admin-env" value=",PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:$PATH" insert-type="append" newline-before="false" newline-after="false" />
+          </definition>
+      </changes>
       </component>
     </service>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ccd6b25e/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
index 4a2a502..4920f12 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
@@ -331,6 +331,12 @@
         </task>
       </execute-stage>
 
+      <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM admin env">
+        <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
+          <summary>Updating YARN NodeManager admin env config</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
         <task xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename">
           <summary>Adding queue customization property</summary>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ccd6b25e/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
index 1eb9836..6acedc9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
@@ -794,6 +794,7 @@
       <component name="NODEMANAGER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_add_spark2_yarn_shuffle"/>
+          <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env"/>
         </pre-upgrade>
 
         <pre-downgrade/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ccd6b25e/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index a29f74b..4c6cb21 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -206,6 +206,14 @@
           </definition>
         </changes>
       </component>
+      <component name="NODEMANAGER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
+            <type>yarn-site</type>
+            <insert key="yarn.nodemanager.admin-env" value=",PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:$PATH" insert-type="append" newline-before="false" newline-after="false" />
+          </definition>
+        </changes>
+      </component>
     </service>
 
     <service name="MAPREDUCE2">

http://git-wip-us.apache.org/repos/asf/ambari/blob/ccd6b25e/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index 8c659ee..d617a31 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -345,6 +345,12 @@
         </task>
       </execute-stage>
 
+      <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM admin env">
+        <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
+          <summary>Updating YARN NM admin env config</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage>
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixCapacitySchedulerOrderingPolicy">
           <summary>Validate Root Queue Ordering Policy</summary>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ccd6b25e/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index 3054ca3..fb854b9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -721,6 +721,10 @@
       </component>
 
       <component name="NODEMANAGER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env"/>
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ccd6b25e/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index a6b7523..a8ac1bc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -117,6 +117,19 @@
           </definition>
         </changes>
       </component>
+      <component name="NODEMANAGER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
+            <type>yarn-site</type>
+            <insert key="yarn.nodemanager.admin-env" value=",PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:$PATH" insert-type="append" newline-before="false" newline-after="false" />
+          </definition>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_llap_mem">
+            <type>yarn-site</type>
+            <set key="yarn.nodemanager.kill-escape.launch-command-line" value="slider-agent,LLAP"/>
+            <set key="yarn.nodemanager.kill-escape.user" value="hive"/>
+          </definition>
+        </changes>
+      </component>
     </service>
 
     <service name="KAFKA">

http://git-wip-us.apache.org/repos/asf/ambari/blob/ccd6b25e/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index 1cdd184..ae7ffc5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -328,6 +328,13 @@
       </execute-stage>
 
       <!-- YARN -->
+      <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM admin env">
+        <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env">
+          <summary>Updating YARN NodeManager admin env config</summary>
+        </task>
+      </execute-stage>
+
+      <!-- YARN -->
       <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM">
         <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_llap_mem">
           <summary>Updating YARN NodeManager config for LLAP</summary>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ccd6b25e/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index 3e7e3d7..c2ae825 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -696,6 +696,7 @@
 
       <component name="NODEMANAGER">
         <pre-upgrade>
+          <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_admin_env"/>
           <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_llap_mem"/>
         </pre-upgrade>
         <pre-downgrade/> <!--  no-op to prevent config changes on downgrade -->