You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2016/09/28 17:27:54 UTC

[01/17] ambari git commit: AMBARI-18464. Provide Warnings When ulimit Is High To Prevent Heartbeat Lost Issues (aonishuk)

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-18456 8192601df -> f0da4fa49


AMBARI-18464. Provide Warnings When ulimit Is High To Prevent Heartbeat Lost Issues (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ee4e63a9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ee4e63a9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ee4e63a9

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: ee4e63a9a75131119f3e157eeb1f8f1462a1798f
Parents: 8192601
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Mon Sep 26 19:09:52 2016 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Sep 26 19:09:52 2016 +0300

----------------------------------------------------------------------
 ambari-server/src/main/resources/alerts.json    | 31 ++++++++
 .../main/resources/host_scripts/alert_ulimit.py | 83 ++++++++++++++++++++
 .../test/python/host_scripts/TestAlertUlimit.py | 44 +++++++++++
 3 files changed, 158 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ee4e63a9/ambari-server/src/main/resources/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/alerts.json b/ambari-server/src/main/resources/alerts.json
index 9cffff5..2559b3a 100644
--- a/ambari-server/src/main/resources/alerts.json
+++ b/ambari-server/src/main/resources/alerts.json
@@ -149,7 +149,38 @@
             }
           ]
         }
+      },
+      {
+        "name": "ambari_agent_ulimit",
+        "label": "Ulimit for open files",
+        "description": "This host-level alert is triggered if value of ulimit for open files (-n) goes above specific thresholds. The default threshold values are 200000 for WARNING and 800000 for CRITICAL.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "alert_ulimit.py",
+          "parameters": [
+            {
+              "name": "ulimit.warning.threshold",
+              "display_name": "Warning",
+              "value": 200000,
+              "type": "NUMERIC",
+              "description": "The threshold of ulimit for open files (-n) for WARNING alert.",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "ulimit.critical.threshold",
+              "display_name": "Critical",
+              "value": 800000,
+              "type": "NUMERIC",
+              "description": "The threshold of ulimit for open files (-n) for CRITICAL alert.",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
       }
+
     ]
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/ee4e63a9/ambari-server/src/main/resources/host_scripts/alert_ulimit.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_ulimit.py b/ambari-server/src/main/resources/host_scripts/alert_ulimit.py
new file mode 100644
index 0000000..8c57b84
--- /dev/null
+++ b/ambari-server/src/main/resources/host_scripts/alert_ulimit.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import resource
+
+WARNING_KEY = "ulimit.warning.threshold"
+CRITICAL_KEY = "ulimit.critical.threshold"
+
+DEFAULT_WARNING_KEY = 200000
+DEFAULT_CRITICAL_KEY = 800000
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return None
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Performs advanced ulimit checks under Linux.
+
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+
+  """
+
+  # try:
+  soft_ulimit, hard_ulimiit = resource.getrlimit(resource.RLIMIT_NOFILE)
+  return_code, label = _get_warnings_for_partition(parameters, soft_ulimit)
+  # except Exception as e:
+  #   return 'CRITICAL', ["Unable to determine ulimit for open files (-n)"]
+
+  return return_code, [label]
+
+def _get_warnings_for_partition(parameters, soft_ulimit):
+
+  # start with hard coded defaults
+  warning_count = DEFAULT_WARNING_KEY
+  critical_count = DEFAULT_CRITICAL_KEY
+
+  if WARNING_KEY in parameters:
+    warning_count = int(parameters[WARNING_KEY])
+
+  if CRITICAL_KEY in parameters:
+    critical_count = int(parameters[CRITICAL_KEY])
+
+  if soft_ulimit is None or soft_ulimit == "":
+    return 'CRITICAL', ['Unable to determine ulimit for open files (-n)']
+
+  return_code = "OK"
+  label = "Ulimit for open files (-n) is {0}".format(soft_ulimit)
+
+  if soft_ulimit >= critical_count:
+    label = "Ulimit for open files (-n) is {0} which is higher or equal than critical value of {1}".format(soft_ulimit, critical_count)
+    return_code = 'CRITICAL'
+  elif soft_ulimit >= warning_count:
+    label = "Ulimit for open files (-n) is {0} which is higher or equal than warning value of {1}".format(soft_ulimit, warning_count)
+    return_code = 'WARNING'
+
+  return return_code, label
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ee4e63a9/ambari-server/src/test/python/host_scripts/TestAlertUlimit.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/host_scripts/TestAlertUlimit.py b/ambari-server/src/test/python/host_scripts/TestAlertUlimit.py
new file mode 100644
index 0000000..09bf4e6
--- /dev/null
+++ b/ambari-server/src/test/python/host_scripts/TestAlertUlimit.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import alert_ulimit
+from mock.mock import patch, MagicMock
+from unittest import TestCase
+
+
+class TestAlertUlimit(TestCase):
+
+  @patch('resource.getrlimit')
+  def test_ulimits(self, ulimit_mock):
+
+    # OK
+    ulimit_mock.return_value = 1024, 1024
+    res = alert_ulimit.execute()
+    self.assertEquals(res, ('OK', ['Ulimit for open files (-n) is 1024']))
+
+    # WARNING
+    ulimit_mock.return_value = 200000, 200000
+    res = alert_ulimit.execute()
+    self.assertEquals(res, ('WARNING', ['Ulimit for open files (-n) is 200000 which is higher or equal than warning value of 200000']))
+
+    # OK
+    ulimit_mock.return_value = 1000000, 1000000
+    res = alert_ulimit.execute()
+    self.assertEquals(res, ('CRITICAL', ['Ulimit for open files (-n) is 1000000 which is higher or equal than critical value of 800000']))
\ No newline at end of file


[05/17] ambari git commit: AMBARI-18466. Component should be renamed to "Microsoft R Server Client". (Attila Doroszlai via stoader)

Posted by jo...@apache.org.
AMBARI-18466. Component should be renamed to "Microsoft R Server Client". (Attila Doroszlai via stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6d352282
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6d352282
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6d352282

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 6d3522825b962e926fe5e244b706a619c74b0116
Parents: 5af6d54
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Tue Sep 27 11:20:06 2016 +0200
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Tue Sep 27 11:20:06 2016 +0200

----------------------------------------------------------------------
 .../scripts/shared_initialization.py            |  2 +-
 .../MICROSOFT_R/8.0.0/metainfo.xml              |  4 ++--
 .../MICROSOFT_R/8.0.0/service_advisor.py        | 22 ++++++++++----------
 3 files changed, 14 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6d352282/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
index 2182fd1..397c22d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
@@ -177,7 +177,7 @@ def create_dirs():
 
 def create_microsoft_r_dir():
   import params
-  if 'MICROSOFT_R_CLIENT' in params.component_list and params.default_fs:
+  if 'MICROSOFT_R_SERVER_CLIENT' in params.component_list and params.default_fs:
     directory = '/user/RevoShare'
     try:
       params.HdfsResource(directory,

http://git-wip-us.apache.org/repos/asf/ambari/blob/6d352282/contrib/management-packs/microsoft-r_mpack/src/main/resources/common-services/MICROSOFT_R/8.0.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/contrib/management-packs/microsoft-r_mpack/src/main/resources/common-services/MICROSOFT_R/8.0.0/metainfo.xml b/contrib/management-packs/microsoft-r_mpack/src/main/resources/common-services/MICROSOFT_R/8.0.0/metainfo.xml
index 6998efc..709929e 100644
--- a/contrib/management-packs/microsoft-r_mpack/src/main/resources/common-services/MICROSOFT_R/8.0.0/metainfo.xml
+++ b/contrib/management-packs/microsoft-r_mpack/src/main/resources/common-services/MICROSOFT_R/8.0.0/metainfo.xml
@@ -27,8 +27,8 @@
 
       <components>
         <component>
-          <name>MICROSOFT_R_CLIENT</name>
-          <displayName>Microsoft R Client</displayName>
+          <name>MICROSOFT_R_SERVER_CLIENT</name>
+          <displayName>Microsoft R Server Client</displayName>
           <category>CLIENT</category>
           <cardinality>1+</cardinality>
           <commandScript>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6d352282/contrib/management-packs/microsoft-r_mpack/src/main/resources/common-services/MICROSOFT_R/8.0.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/microsoft-r_mpack/src/main/resources/common-services/MICROSOFT_R/8.0.0/service_advisor.py b/contrib/management-packs/microsoft-r_mpack/src/main/resources/common-services/MICROSOFT_R/8.0.0/service_advisor.py
index 891d2c9..58f0dbe 100644
--- a/contrib/management-packs/microsoft-r_mpack/src/main/resources/common-services/MICROSOFT_R/8.0.0/service_advisor.py
+++ b/contrib/management-packs/microsoft-r_mpack/src/main/resources/common-services/MICROSOFT_R/8.0.0/service_advisor.py
@@ -37,19 +37,19 @@ except Exception as e:
 class MICROSOFT_R800ServiceAdvisor(service_advisor.ServiceAdvisor):
 
   def colocateService(self, hostsComponentsMap, serviceComponents):
-    # colocate R_CLIENT with NODEMANAGERs and YARN_CLIENTs
-    rClientComponent = [component for component in serviceComponents if component["StackServiceComponents"]["component_name"] == "MICROSOFT_R_CLIENT"]
+    # colocate R_SERVER_CLIENT with NODEMANAGERs and YARN_CLIENTs
+    rClientComponent = [component for component in serviceComponents if component["StackServiceComponents"]["component_name"] == "MICROSOFT_R_SERVER_CLIENT"]
     traceback.print_tb(None)
     rClientComponent = rClientComponent[0]
     if not self.isComponentHostsPopulated(rClientComponent):
       for hostName in hostsComponentsMap.keys():
         hostComponents = hostsComponentsMap[hostName]
         if ({"name": "NODEMANAGER"} in hostComponents or {"name": "YARN_CLIENT"} in hostComponents) \
-            and {"name": "MICROSOFT_R_CLIENT"} not in hostComponents:
-          hostsComponentsMap[hostName].append({ "name": "MICROSOFT_R_CLIENT" })
+            and {"name": "MICROSOFT_R_SERVER_CLIENT"} not in hostComponents:
+          hostsComponentsMap[hostName].append({ "name": "MICROSOFT_R_SERVER_CLIENT" })
         if ({"name": "NODEMANAGER"} not in hostComponents and {"name": "YARN_CLIENT"} not in hostComponents) \
-            and {"name": "MICROSOFT_R_CLIENT"} in hostComponents:
-          hostsComponentsMap[hostName].remove({"name": "MICROSOFT_R_CLIENT"})
+            and {"name": "MICROSOFT_R_SERVER_CLIENT"} in hostComponents:
+          hostsComponentsMap[hostName].remove({"name": "MICROSOFT_R_SERVER_CLIENT"})
 
   def getServiceComponentLayoutValidations(self, services, hosts):
     componentsListList = [service["components"] for service in services["services"]]
@@ -57,17 +57,17 @@ class MICROSOFT_R800ServiceAdvisor(service_advisor.ServiceAdvisor):
     hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
     hostsCount = len(hostsList)
 
-    rClientHosts = self.getHosts(componentsList, "MICROSOFT_R_CLIENT")
+    rClientHosts = self.getHosts(componentsList, "MICROSOFT_R_SERVER_CLIENT")
     expectedrClientHosts = set(self.getHosts(componentsList, "NODEMANAGER")) | set(self.getHosts(componentsList, "YARN_CLIENT"))
 
     items = []
 
-    # Generate WARNING if any R_CLIENT is not colocated with NODEMANAGER or YARN_CLIENT
+    # Generate WARNING if any R_SERVER_CLIENT is not colocated with NODEMANAGER or YARN_CLIENT
     mismatchHosts = sorted(expectedrClientHosts.symmetric_difference(set(rClientHosts)))
     if len(mismatchHosts) > 0:
       hostsString = ', '.join(mismatchHosts)
-      message = "Microsoft-R Client must be installed on NodeManagers and YARN Clients. " \
+      message = "Microsoft R Server Client must be installed on NodeManagers and YARN Clients. " \
                 "The following {0} host(s) do not satisfy the colocation recommendation: {1}".format(len(mismatchHosts), hostsString)
-      items.append( { "type": 'host-component', "level": 'WARN', "message": message, "component-name": 'MICROSOFT_R_CLIENT' } )
+      items.append( { "type": 'host-component', "level": 'WARN', "message": message, "component-name": 'MICROSOFT_R_SERVER_CLIENT' } )
 
-    return items
\ No newline at end of file
+    return items


[11/17] ambari git commit: AMBARI-18471. Refactor yarn() function in YARN service. Part 1. (aonishuk)

Posted by jo...@apache.org.
AMBARI-18471. Refactor yarn() function in YARN service. Part 1. (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6fb1ceef
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6fb1ceef
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6fb1ceef

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 6fb1ceef66b8d63c66603eebf1cd9e6d91c815aa
Parents: c265ae6
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Wed Sep 28 08:44:44 2016 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Wed Sep 28 08:44:44 2016 +0300

----------------------------------------------------------------------
 .../2.1.0.2.0/package/scripts/params_linux.py   |   7 +
 .../YARN/2.1.0.2.0/package/scripts/yarn.py      | 548 +++++++++----------
 .../stacks/2.0.6/YARN/test_historyserver.py     |   1 -
 .../stacks/2.0.6/YARN/test_resourcemanager.py   |  54 +-
 .../stacks/2.1/YARN/test_apptimelineserver.py   |  40 +-
 .../test/python/stacks/2.3/YARN/test_ats_1_5.py | 188 +++----
 6 files changed, 421 insertions(+), 417 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6fb1ceef/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index 4d42861..0d46069 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -193,6 +193,13 @@ else:
   rm_webui_address = format("{rm_host}:{rm_port}")
   rm_webui_https_address = format("{rm_host}:{rm_https_port}")
 
+if security_enabled:
+  tc_mode = 0644
+  tc_owner = "root"
+else:
+  tc_mode = None
+  tc_owner = hdfs_user
+
 nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
 hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
 nm_address = config['configurations']['yarn-site']['yarn.nodemanager.address']  # still contains 0.0.0.0

http://git-wip-us.apache.org/repos/asf/ambari/blob/6fb1ceef/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
index a3a3a06..70ed5b3 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
@@ -37,63 +37,6 @@ from ambari_commons import OSConst
 
 from resource_management.libraries.functions.mounted_dirs_helper import handle_mounted_dirs
 
-# Local Imports
-
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def yarn(name = None):
-  import params
-  XmlConfig("mapred-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            owner=params.yarn_user,
-            mode='f'
-  )
-  XmlConfig("yarn-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['yarn-site'],
-            owner=params.yarn_user,
-            mode='f',
-            configuration_attributes=params.config['configuration_attributes']['yarn-site']
-  )
-  XmlConfig("capacity-scheduler.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['capacity-scheduler'],
-            owner=params.yarn_user,
-            mode='f'
-  )
-
-  if params.service_map.has_key(name):
-    service_name = params.service_map[name]
-
-    ServiceConfig(service_name,
-                  action="change_user",
-                  username = params.yarn_user,
-                  password = Script.get_password(params.yarn_user))
-
-def create_log_dir(dir_name):
-  import params
-  Directory(dir_name,
-            create_parents = True,
-            cd_access="a",
-            mode=0775,
-            owner=params.yarn_user,
-            group=params.user_group,
-            ignore_failures=True,
-  )
-  
-def create_local_dir(dir_name):
-  import params
-  Directory(dir_name,
-            create_parents = True,
-            cd_access="a",
-            mode=0755,
-            owner=params.yarn_user,
-            group=params.user_group,
-            ignore_failures=True,
-            recursive_mode_flags = {'f': 'a+rw', 'd': 'a+rwx'},
-  )
-
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
 def yarn(name=None, config_dir=None):
   """
@@ -102,107 +45,18 @@ def yarn(name=None, config_dir=None):
   """
   import params
 
+  if name == 'resourcemanager':
+    setup_resourcemanager()
+  elif name == 'nodemanager':
+    setup_nodemanager()
+  elif name == 'apptimelineserver':
+    setup_ats()
+  elif name == 'historyserver':
+    setup_historyserver()
+
   if config_dir is None:
     config_dir = params.hadoop_conf_dir
 
-  if name == "historyserver":
-    if params.yarn_log_aggregation_enabled:
-      params.HdfsResource(params.yarn_nm_app_log_dir,
-                           action="create_on_execute",
-                           type="directory",
-                           owner=params.yarn_user,
-                           group=params.user_group,
-                           mode=01777,
-                           recursive_chmod=True
-      )
-
-    # create the /tmp folder with proper permissions if it doesn't exist yet
-    if params.entity_file_history_directory.startswith('/tmp'):
-        params.HdfsResource(params.hdfs_tmp_dir,
-                            action="create_on_execute",
-                            type="directory",
-                            owner=params.hdfs_user,
-                            mode=0777,
-        )
-
-    params.HdfsResource(params.entity_file_history_directory,
-                           action="create_on_execute",
-                           type="directory",
-                           owner=params.yarn_user,
-                           group=params.user_group
-    )
-    params.HdfsResource("/mapred",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.mapred_user
-    )
-    params.HdfsResource("/mapred/system",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hdfs_user
-    )
-    params.HdfsResource(params.mapreduce_jobhistory_done_dir,
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.mapred_user,
-                         group=params.user_group,
-                         change_permissions_for_parents=True,
-                         mode=0777
-    )
-    params.HdfsResource(None, action="execute")
-    Directory(params.jhs_leveldb_state_store_dir,
-              owner=params.mapred_user,
-              group=params.user_group,
-              create_parents = True,
-              cd_access="a",
-              recursive_ownership = True,
-              )
-
-  #<editor-fold desc="Node Manager Section">
-  if name == "nodemanager":
-
-    # First start after enabling/disabling security
-    if params.toggle_nm_security:
-      Directory(params.nm_local_dirs_list + params.nm_log_dirs_list,
-                action='delete'
-      )
-
-      # If yarn.nodemanager.recovery.dir exists, remove this dir
-      if params.yarn_nodemanager_recovery_dir:
-        Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
-                  action='delete'
-        )
-
-      # Setting NM marker file
-      if params.security_enabled:
-        Directory(params.nm_security_marker_dir)
-        File(params.nm_security_marker,
-             content="Marker file to track first start after enabling/disabling security. "
-                     "During first start yarn local, log dirs are removed and recreated"
-             )
-      elif not params.security_enabled:
-        File(params.nm_security_marker, action="delete")
-
-
-    if not params.security_enabled or params.toggle_nm_security:
-      # handle_mounted_dirs ensures that we don't create dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
-      nm_log_dir_to_mount_file_content = handle_mounted_dirs(create_log_dir, params.nm_log_dirs, params.nm_log_dir_to_mount_file, params)
-      # create a history file used by handle_mounted_dirs
-      File(params.nm_log_dir_to_mount_file,
-           owner=params.hdfs_user,
-           group=params.user_group,
-           mode=0644,
-           content=nm_log_dir_to_mount_file_content
-      )
-      nm_local_dir_to_mount_file_content = handle_mounted_dirs(create_local_dir, params.nm_local_dirs, params.nm_local_dir_to_mount_file, params)
-      File(params.nm_local_dir_to_mount_file,
-           owner=params.hdfs_user,
-           group=params.user_group,
-           mode=0644,
-           content=nm_local_dir_to_mount_file_content
-      )
-  #</editor-fold>
-
   if params.yarn_nodemanager_recovery_dir:
     Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
               owner=params.yarn_user,
@@ -245,15 +99,14 @@ def yarn(name=None, config_dir=None):
   # During RU, Core Masters and Slaves need hdfs-site.xml
   # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
   # RU should rely on all available in <stack-root>/<version>/hadoop/conf
-  if 'hdfs-site' in params.config['configurations']:
-    XmlConfig("hdfs-site.xml",
-              conf_dir=config_dir,
-              configurations=params.config['configurations']['hdfs-site'],
-              configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-              owner=params.hdfs_user,
-              group=params.user_group,
-              mode=0644
-    )
+  XmlConfig("hdfs-site.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
 
   XmlConfig("mapred-site.xml",
             conf_dir=config_dir,
@@ -282,85 +135,6 @@ def yarn(name=None, config_dir=None):
             mode=0644
   )
 
-  if name == 'resourcemanager':
-    Directory(params.rm_nodes_exclude_dir,
-         mode=0755,
-         create_parents=True,
-         cd_access='a',
-    )
-    File(params.rm_nodes_exclude_path,
-         owner=params.yarn_user,
-         group=params.user_group
-    )
-    File(params.yarn_job_summary_log,
-       owner=params.yarn_user,
-       group=params.user_group
-    )
-    if not is_empty(params.node_label_enable) and params.node_label_enable or is_empty(params.node_label_enable) and params.node_labels_dir:
-      params.HdfsResource(params.node_labels_dir,
-                           type="directory",
-                           action="create_on_execute",
-                           change_permissions_for_parents=True,
-                           owner=params.yarn_user,
-                           group=params.user_group,
-                           mode=0700
-      )
-      params.HdfsResource(None, action="execute")
-
-
-  elif name == 'apptimelineserver':
-    Directory(params.ats_leveldb_dir,
-       owner=params.yarn_user,
-       group=params.user_group,
-       create_parents = True,
-       cd_access="a",
-    )
-
-    # if stack support application timeline-service state store property (timeline_state_store stack feature)
-    if params.stack_supports_timeline_state_store:
-      Directory(params.ats_leveldb_state_store_dir,
-       owner=params.yarn_user,
-       group=params.user_group,
-       create_parents = True,
-       cd_access="a",
-      )
-    # app timeline server 1.5 directories
-    if not is_empty(params.entity_groupfs_store_dir):
-      parent_path = os.path.dirname(params.entity_groupfs_store_dir)
-      params.HdfsResource(parent_path,
-                          type="directory",
-                          action="create_on_execute",
-                          change_permissions_for_parents=True,
-                          owner=params.yarn_user,
-                          group=params.user_group,
-                          mode=0755
-                          )
-      params.HdfsResource(params.entity_groupfs_store_dir,
-                          type="directory",
-                          action="create_on_execute",
-                          owner=params.yarn_user,
-                          group=params.user_group,
-                          mode=params.entity_groupfs_store_dir_mode
-                          )
-    if not is_empty(params.entity_groupfs_active_dir):
-      parent_path = os.path.dirname(params.entity_groupfs_active_dir)
-      params.HdfsResource(parent_path,
-                          type="directory",
-                          action="create_on_execute",
-                          change_permissions_for_parents=True,
-                          owner=params.yarn_user,
-                          group=params.user_group,
-                          mode=0755
-                          )
-      params.HdfsResource(params.entity_groupfs_active_dir,
-                          type="directory",
-                          action="create_on_execute",
-                          owner=params.yarn_user,
-                          group=params.user_group,
-                          mode=params.entity_groupfs_active_dir_mode
-                          )
-    params.HdfsResource(None, action="execute")
-
   File(format("{limits_conf_dir}/yarn.conf"),
        mode=0644,
        content=Template('yarn.conf.j2')
@@ -378,8 +152,7 @@ def yarn(name=None, config_dir=None):
        content=InlineTemplate(params.yarn_env_sh_template)
   )
 
-  container_executor = format("{yarn_container_bin}/container-executor")
-  File(container_executor,
+  File(format("{yarn_container_bin}/container-executor"),
       group=params.yarn_executor_container_group,
       mode=params.container_executor_mode
   )
@@ -396,15 +169,8 @@ def yarn(name=None, config_dir=None):
             mode=0755,
             cd_access="a")
 
-  if params.security_enabled:
-    tc_mode = 0644
-    tc_owner = "root"
-  else:
-    tc_mode = None
-    tc_owner = params.hdfs_user
-
   File(os.path.join(config_dir, "mapred-env.sh"),
-       owner=tc_owner,
+       owner=params.tc_owner,
        mode=0755,
        content=InlineTemplate(params.mapred_env_sh_template)
   )
@@ -416,35 +182,34 @@ def yarn(name=None, config_dir=None):
          mode=06050
     )
     File(os.path.join(config_dir, 'taskcontroller.cfg'),
-         owner = tc_owner,
-         mode = tc_mode,
+         owner = params.tc_owner,
+         mode = params.tc_mode,
          group = params.mapred_tt_group,
          content=Template("taskcontroller.cfg.j2")
     )
   else:
     File(os.path.join(config_dir, 'taskcontroller.cfg'),
-         owner=tc_owner,
+         owner=params.tc_owner,
          content=Template("taskcontroller.cfg.j2")
     )
 
-  if "mapred-site" in params.config['configurations']:
-    XmlConfig("mapred-site.xml",
-              conf_dir=config_dir,
-              configurations=params.config['configurations']['mapred-site'],
-              configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-              owner=params.mapred_user,
-              group=params.user_group
-    )
+  XmlConfig("mapred-site.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+            owner=params.mapred_user,
+            group=params.user_group
+  )
+
+  XmlConfig("capacity-scheduler.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations'][
+              'capacity-scheduler'],
+            configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
 
-  if "capacity-scheduler" in params.config['configurations']:
-    XmlConfig("capacity-scheduler.xml",
-              conf_dir=config_dir,
-              configurations=params.config['configurations'][
-                'capacity-scheduler'],
-              configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
   if "ssl-client" in params.config['configurations']:
     XmlConfig("ssl-client.xml",
               conf_dir=config_dir,
@@ -495,4 +260,239 @@ def yarn(name=None, config_dir=None):
     File(os.path.join(config_dir, 'ssl-server.xml.example'),
          owner=params.mapred_user,
          group=params.user_group
-    )
\ No newline at end of file
+    )
+
+def setup_historyserver():
+  import params
+
+  if params.yarn_log_aggregation_enabled:
+    params.HdfsResource(params.yarn_nm_app_log_dir,
+                         action="create_on_execute",
+                         type="directory",
+                         owner=params.yarn_user,
+                         group=params.user_group,
+                         mode=01777,
+                         recursive_chmod=True
+    )
+
+  # create the /tmp folder with proper permissions if it doesn't exist yet
+  if params.entity_file_history_directory.startswith('/tmp'):
+      params.HdfsResource(params.hdfs_tmp_dir,
+                          action="create_on_execute",
+                          type="directory",
+                          owner=params.hdfs_user,
+                          mode=0777,
+      )
+
+  params.HdfsResource(params.entity_file_history_directory,
+                         action="create_on_execute",
+                         type="directory",
+                         owner=params.yarn_user,
+                         group=params.user_group
+  )
+  params.HdfsResource("/mapred",
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.mapred_user
+  )
+  params.HdfsResource("/mapred/system",
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user
+  )
+  params.HdfsResource(params.mapreduce_jobhistory_done_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.mapred_user,
+                       group=params.user_group,
+                       change_permissions_for_parents=True,
+                       mode=0777
+  )
+  params.HdfsResource(None, action="execute")
+  Directory(params.jhs_leveldb_state_store_dir,
+            owner=params.mapred_user,
+            group=params.user_group,
+            create_parents = True,
+            cd_access="a",
+            recursive_ownership = True,
+            )
+
+def setup_nodemanager():
+  import params
+
+  # First start after enabling/disabling security
+  if params.toggle_nm_security:
+    Directory(params.nm_local_dirs_list + params.nm_log_dirs_list,
+              action='delete'
+    )
+
+    # If yarn.nodemanager.recovery.dir exists, remove this dir
+    if params.yarn_nodemanager_recovery_dir:
+      Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
+                action='delete'
+      )
+
+    # Setting NM marker file
+    if params.security_enabled:
+      Directory(params.nm_security_marker_dir)
+      File(params.nm_security_marker,
+           content="Marker file to track first start after enabling/disabling security. "
+                   "During first start yarn local, log dirs are removed and recreated"
+           )
+    elif not params.security_enabled:
+      File(params.nm_security_marker, action="delete")
+
+
+  if not params.security_enabled or params.toggle_nm_security:
+    # handle_mounted_dirs ensures that we don't create dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
+    nm_log_dir_to_mount_file_content = handle_mounted_dirs(create_log_dir, params.nm_log_dirs, params.nm_log_dir_to_mount_file, params)
+    # create a history file used by handle_mounted_dirs
+    File(params.nm_log_dir_to_mount_file,
+         owner=params.hdfs_user,
+         group=params.user_group,
+         mode=0644,
+         content=nm_log_dir_to_mount_file_content
+    )
+    nm_local_dir_to_mount_file_content = handle_mounted_dirs(create_local_dir, params.nm_local_dirs, params.nm_local_dir_to_mount_file, params)
+    File(params.nm_local_dir_to_mount_file,
+         owner=params.hdfs_user,
+         group=params.user_group,
+         mode=0644,
+         content=nm_local_dir_to_mount_file_content
+    )
+
+def setup_resourcemanager():
+  import params
+
+  Directory(params.rm_nodes_exclude_dir,
+       mode=0755,
+       create_parents=True,
+       cd_access='a',
+  )
+  File(params.rm_nodes_exclude_path,
+       owner=params.yarn_user,
+       group=params.user_group
+  )
+  File(params.yarn_job_summary_log,
+     owner=params.yarn_user,
+     group=params.user_group
+  )
+  if not is_empty(params.node_label_enable) and params.node_label_enable or is_empty(params.node_label_enable) and params.node_labels_dir:
+    params.HdfsResource(params.node_labels_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         change_permissions_for_parents=True,
+                         owner=params.yarn_user,
+                         group=params.user_group,
+                         mode=0700
+    )
+    params.HdfsResource(None, action="execute")
+
+def setup_ats():
+  import params
+
+  Directory(params.ats_leveldb_dir,
+     owner=params.yarn_user,
+     group=params.user_group,
+     create_parents = True,
+     cd_access="a",
+  )
+
+  # if stack support application timeline-service state store property (timeline_state_store stack feature)
+  if params.stack_supports_timeline_state_store:
+    Directory(params.ats_leveldb_state_store_dir,
+     owner=params.yarn_user,
+     group=params.user_group,
+     create_parents = True,
+     cd_access="a",
+    )
+  # app timeline server 1.5 directories
+  if not is_empty(params.entity_groupfs_store_dir):
+    parent_path = os.path.dirname(params.entity_groupfs_store_dir)
+    params.HdfsResource(parent_path,
+                        type="directory",
+                        action="create_on_execute",
+                        change_permissions_for_parents=True,
+                        owner=params.yarn_user,
+                        group=params.user_group,
+                        mode=0755
+                        )
+    params.HdfsResource(params.entity_groupfs_store_dir,
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.yarn_user,
+                        group=params.user_group,
+                        mode=params.entity_groupfs_store_dir_mode
+                        )
+  if not is_empty(params.entity_groupfs_active_dir):
+    parent_path = os.path.dirname(params.entity_groupfs_active_dir)
+    params.HdfsResource(parent_path,
+                        type="directory",
+                        action="create_on_execute",
+                        change_permissions_for_parents=True,
+                        owner=params.yarn_user,
+                        group=params.user_group,
+                        mode=0755
+                        )
+    params.HdfsResource(params.entity_groupfs_active_dir,
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.yarn_user,
+                        group=params.user_group,
+                        mode=params.entity_groupfs_active_dir_mode
+                        )
+  params.HdfsResource(None, action="execute")
+
+def create_log_dir(dir_name):
+  import params
+  Directory(dir_name,
+            create_parents = True,
+            cd_access="a",
+            mode=0775,
+            owner=params.yarn_user,
+            group=params.user_group,
+            ignore_failures=True,
+  )
+
+def create_local_dir(dir_name):
+  import params
+  Directory(dir_name,
+            create_parents = True,
+            cd_access="a",
+            mode=0755,
+            owner=params.yarn_user,
+            group=params.user_group,
+            ignore_failures=True,
+            recursive_mode_flags = {'f': 'a+rw', 'd': 'a+rwx'},
+  )
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def yarn(name = None):
+  import params
+  XmlConfig("mapred-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            owner=params.yarn_user,
+            mode='f'
+  )
+  XmlConfig("yarn-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['yarn-site'],
+            owner=params.yarn_user,
+            mode='f',
+            configuration_attributes=params.config['configuration_attributes']['yarn-site']
+  )
+  XmlConfig("capacity-scheduler.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['capacity-scheduler'],
+            owner=params.yarn_user,
+            mode='f'
+  )
+
+  if params.service_map.has_key(name):
+    service_name = params.service_map[name]
+
+    ServiceConfig(service_name,
+                  action="change_user",
+                  username = params.yarn_user,
+                  password = Script.get_password(params.yarn_user))
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/6fb1ceef/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
index 643f946..119dcf0 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
@@ -464,7 +464,6 @@ class TestHistoryServer(RMFTestCase):
                               )
 
   def assert_configure_secured(self):
-
     self.assertResourceCalled('HdfsResource', '/app-logs',
         immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,

http://git-wip-us.apache.org/repos/asf/ambari/blob/6fb1ceef/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
index 7b5ce18..c98a64d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
@@ -178,7 +178,19 @@ class TestResourceManager(RMFTestCase):
     self.assertNoMoreResources()
 
   def assert_configure_default(self):
-
+    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
+        mode = 0755,
+        create_parents = True,
+        cd_access = 'a',
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
+        owner = 'yarn',
+        group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
+      owner = 'yarn',
+      group = 'hadoop',
+    )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
       owner = 'yarn',
       group = 'hadoop',
@@ -268,19 +280,6 @@ class TestResourceManager(RMFTestCase):
       configurations = self.getConfig()['configurations']['capacity-scheduler'],
       configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
     )
-    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
-        mode = 0755,
-        create_parents = True,
-        cd_access = 'a',
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
-        owner = 'yarn',
-        group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
-      owner = 'yarn',
-      group = 'hadoop',
-    )
     self.assertResourceCalled('File', '/etc/security/limits.d/yarn.conf',
       content = Template('yarn.conf.j2'),
       mode = 0644,
@@ -347,7 +346,19 @@ class TestResourceManager(RMFTestCase):
                               )
 
   def assert_configure_secured(self):
-
+    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
+        mode = 0755,
+        create_parents = True,
+        cd_access = 'a',
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
+        owner = 'yarn',
+        group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
+      owner = 'yarn',
+      group = 'hadoop',
+    )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
       owner = 'yarn',
       group = 'hadoop',
@@ -437,19 +448,6 @@ class TestResourceManager(RMFTestCase):
       configurations = self.getConfig()['configurations']['capacity-scheduler'],
       configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
     )
-    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
-        mode = 0755,
-        create_parents = True,
-        cd_access = 'a',
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
-        owner = 'yarn',
-        group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
-      owner = 'yarn',
-      group = 'hadoop',
-    )
     self.assertResourceCalled('File', '/etc/security/limits.d/yarn.conf',
       content = Template('yarn.conf.j2'),
       mode = 0644,

http://git-wip-us.apache.org/repos/asf/ambari/blob/6fb1ceef/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
index dc11ba9..40db813 100644
--- a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
+++ b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
@@ -92,6 +92,26 @@ class TestAppTimelineServer(RMFTestCase):
     self.assertNoMoreResources()
 
   def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/timeline',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access='a'
+                              )
+    self.assertResourceCalled('HdfsResource', None,
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                              security_enabled = False,
+                              hadoop_bin_dir = '/usr/bin',
+                              keytab = UnknownConfigurationMock(),
+                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+                              dfs_type = '',
+                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                              kinit_path_local = '/usr/bin/kinit',
+                              principal_name = UnknownConfigurationMock(),
+                              user = 'hdfs',
+                              action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              hadoop_conf_dir = '/etc/hadoop/conf',
+                              )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
                               owner = 'yarn',
                               group = 'hadoop',
@@ -181,26 +201,6 @@ class TestAppTimelineServer(RMFTestCase):
                               configurations = self.getConfig()['configurations']['capacity-scheduler'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
                               )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/timeline',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access='a'
-                              )
-    self.assertResourceCalled('HdfsResource', None,
-                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
-                              security_enabled = False,
-                              hadoop_bin_dir = '/usr/bin',
-                              keytab = UnknownConfigurationMock(),
-                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-                              dfs_type = '',
-                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
-                              kinit_path_local = '/usr/bin/kinit',
-                              principal_name = UnknownConfigurationMock(),
-                              user = 'hdfs',
-                              action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-                              hadoop_conf_dir = '/etc/hadoop/conf',
-                              )
     self.assertResourceCalled('File', '/etc/security/limits.d/yarn.conf',
                               content = Template('yarn.conf.j2'),
                               mode = 0644,

http://git-wip-us.apache.org/repos/asf/ambari/blob/6fb1ceef/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py b/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
index abfe756..b523412 100644
--- a/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
+++ b/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
@@ -50,6 +50,100 @@ class TestAts(RMFTestCase):
     self.assertNoMoreResources()
 
   def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/timeline',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
+    self.assertResourceCalled('HdfsResource', '/ats',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                              security_enabled = False,
+                              hadoop_bin_dir = '/usr/bin',
+                              keytab = UnknownConfigurationMock(),
+                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+                              dfs_type = '',
+                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                              kinit_path_local = '/usr/bin/kinit',
+                              principal_name = UnknownConfigurationMock(),
+                              user = 'hdfs',
+                              change_permissions_for_parents = True,
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              hadoop_conf_dir = '/etc/hadoop/conf',
+                              type = 'directory',
+                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('HdfsResource', '/ats/done',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                              security_enabled = False,
+                              hadoop_bin_dir = '/usr/bin',
+                              keytab = UnknownConfigurationMock(),
+                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+                              dfs_type = '',
+                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                              kinit_path_local = '/usr/bin/kinit',
+                              principal_name = UnknownConfigurationMock(),
+                              user = 'hdfs',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              hadoop_conf_dir = '/etc/hadoop/conf',
+                              type = 'directory',
+                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              mode = 0700,
+                              )
+    self.assertResourceCalled('HdfsResource', '/ats',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                              security_enabled = False,
+                              hadoop_bin_dir = '/usr/bin',
+                              keytab = UnknownConfigurationMock(),
+                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+                              dfs_type = '',
+                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                              kinit_path_local = '/usr/bin/kinit',
+                              principal_name = UnknownConfigurationMock(),
+                              user = 'hdfs',
+                              change_permissions_for_parents = True,
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              hadoop_conf_dir = '/etc/hadoop/conf',
+                              type = 'directory',
+                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('HdfsResource', '/ats/active',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                              security_enabled = False,
+                              hadoop_bin_dir = '/usr/bin',
+                              keytab = UnknownConfigurationMock(),
+                              dfs_type = '',
+                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                              kinit_path_local = '/usr/bin/kinit',
+                              principal_name = UnknownConfigurationMock(),
+                              user = 'hdfs',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              hadoop_conf_dir = '/etc/hadoop/conf',
+                              type = 'directory',
+                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              mode = 01777,
+                              )
+    self.assertResourceCalled('HdfsResource', None,
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                              security_enabled = False,
+                              hadoop_bin_dir = '/usr/bin',
+                              keytab = UnknownConfigurationMock(),
+                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+                              dfs_type = '',
+                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                              kinit_path_local = '/usr/bin/kinit',
+                              principal_name = UnknownConfigurationMock(),
+                              user = 'hdfs',
+                              action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              hadoop_conf_dir = '/etc/hadoop/conf',
+                              )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
                               owner = 'yarn',
                               group = 'hadoop',
@@ -146,100 +240,6 @@ class TestAts(RMFTestCase):
                               owner = 'yarn',
                               configurations = self.getConfig()['configurations']['capacity-scheduler'],
                               )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/timeline',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
-    self.assertResourceCalled('HdfsResource', '/ats',
-                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
-                              security_enabled = False,
-                              hadoop_bin_dir = '/usr/bin',
-                              keytab = UnknownConfigurationMock(),
-                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-                              dfs_type = '',
-                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
-                              kinit_path_local = '/usr/bin/kinit',
-                              principal_name = UnknownConfigurationMock(),
-                              user = 'hdfs',
-                              change_permissions_for_parents = True,
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              hadoop_conf_dir = '/etc/hadoop/conf',
-                              type = 'directory',
-                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('HdfsResource', '/ats/done',
-                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
-                              security_enabled = False,
-                              hadoop_bin_dir = '/usr/bin',
-                              keytab = UnknownConfigurationMock(),
-                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-                              dfs_type = '',
-                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
-                              kinit_path_local = '/usr/bin/kinit',
-                              principal_name = UnknownConfigurationMock(),
-                              user = 'hdfs',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              hadoop_conf_dir = '/etc/hadoop/conf',
-                              type = 'directory',
-                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-                              mode = 0700,
-                              )
-    self.assertResourceCalled('HdfsResource', '/ats',
-                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
-                              security_enabled = False,
-                              hadoop_bin_dir = '/usr/bin',
-                              keytab = UnknownConfigurationMock(),
-                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-                              dfs_type = '',
-                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
-                              kinit_path_local = '/usr/bin/kinit',
-                              principal_name = UnknownConfigurationMock(),
-                              user = 'hdfs',
-                              change_permissions_for_parents = True,
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              hadoop_conf_dir = '/etc/hadoop/conf',
-                              type = 'directory',
-                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('HdfsResource', '/ats/active',
-                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
-                              security_enabled = False,
-                              hadoop_bin_dir = '/usr/bin',
-                              keytab = UnknownConfigurationMock(),
-                              dfs_type = '',
-                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
-                              kinit_path_local = '/usr/bin/kinit',
-                              principal_name = UnknownConfigurationMock(),
-                              user = 'hdfs',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              hadoop_conf_dir = '/etc/hadoop/conf',
-                              type = 'directory',
-                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-                              mode = 01777,
-                              )
-    self.assertResourceCalled('HdfsResource', None,
-                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
-                              security_enabled = False,
-                              hadoop_bin_dir = '/usr/bin',
-                              keytab = UnknownConfigurationMock(),
-                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-                              dfs_type = '',
-                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
-                              kinit_path_local = '/usr/bin/kinit',
-                              principal_name = UnknownConfigurationMock(),
-                              user = 'hdfs',
-                              action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-                              hadoop_conf_dir = '/etc/hadoop/conf',
-                              )
     self.assertResourceCalled('File', '/etc/security/limits.d/yarn.conf',
                               content = Template('yarn.conf.j2'),
                               mode = 0644,


[13/17] ambari git commit: AMBARI-18467. Ambari server does not come up after restart if cluster install fails. (Balazs Bence Sari via stoader)

Posted by jo...@apache.org.
AMBARI-18467. Ambari server does not come up after restart if cluster install fails. (Balazs Bence Sari via stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2700bd12
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2700bd12
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2700bd12

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 2700bd125f2f9bbae2ebfcc0831ec881097b4cff
Parents: a0fff84
Author: Balazs Bence Sari <bs...@hortonworks.com>
Authored: Wed Sep 28 13:42:59 2016 +0200
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Wed Sep 28 13:42:59 2016 +0200

----------------------------------------------------------------------
 .../stack/UpdateActiveRepoVersionOnStartup.java |  9 +++++--
 .../UpdateActiveRepoVersionOnStartupTest.java   | 28 +++++++++++++-------
 2 files changed, 26 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2700bd12/ambari-server/src/main/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartup.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartup.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartup.java
index 1413c66..8a32a42 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartup.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartup.java
@@ -94,9 +94,14 @@ public class UpdateActiveRepoVersionOnStartup {
         LOG.info("Updating existing repo versions for cluster {} on stack {}-{}",
             cluster.getClusterName(), stack.getName(), stack.getVersion());
         ClusterVersionEntity clusterVersion = clusterVersionDao.findByClusterAndStateCurrent(cluster.getClusterName());
+        if (null != clusterVersion) {
           RepositoryVersionEntity repoVersion = clusterVersion.getRepositoryVersion();
-        updateRepoVersion(stack, repoVersion);
-        repositoryVersionDao.merge(repoVersion);
+          updateRepoVersion(stack, repoVersion);
+          repositoryVersionDao.merge(repoVersion);
+        }
+        else {
+          LOG.warn("Missing cluster version for cluster {}", cluster.getClusterName());
+        }
       }
     }
     catch(Exception ex) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/2700bd12/ambari-server/src/test/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest.java
index 9c54a88..24ab0e8 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest.java
@@ -37,7 +37,6 @@ import org.apache.ambari.server.state.RepositoryInfo;
 import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
 import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
 
 import com.google.common.base.Charsets;
@@ -61,10 +60,17 @@ public class UpdateActiveRepoVersionOnStartupTest {
 
   @Test
   public void addAServiceRepoToExistingRepoVersion() throws Exception {
+    init(true);
     activeRepoUpdater.process();
     verifyRepoIsAdded();
   }
 
+  @Test
+  public void missingClusterVersionShouldNotCauseException() throws Exception {
+    init(false);
+    activeRepoUpdater.process();
+  }
+
   /**
    * Verifies if the add-on service repo is added to the repo version entity, both json and xml representations.
    *
@@ -84,8 +90,7 @@ public class UpdateActiveRepoVersionOnStartupTest {
     Assert.assertTrue(ADD_ON_REPO_ID + " is add-on repo was not added to JSON representation", serviceRepoAddedToJson);
   }
 
-  @Before
-  public void init() throws Exception {
+  public void init(boolean addClusterVersion) throws Exception {
     ClusterDAO clusterDao = mock(ClusterDAO.class);
     ClusterVersionDAO clusterVersionDAO = mock(ClusterVersionDAO.class);
     repositoryVersionDao = mock(RepositoryVersionDAO.class);
@@ -125,17 +130,22 @@ public class UpdateActiveRepoVersionOnStartupTest {
       }
     };
     Injector injector = Guice.createInjector(testModule);
-    repoVersion = new RepositoryVersionEntity();
-    repoVersion.setStack(stackEntity);
-    repoVersion.setOperatingSystems(resourceAsString("org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest_initialRepos.json"));
-    ClusterVersionEntity clusterVersion = new ClusterVersionEntity();
-    clusterVersion.setRepositoryVersion(repoVersion);
-    when(clusterVersionDAO.findByClusterAndStateCurrent(CLUSTER_NAME)).thenReturn(clusterVersion);
+    if (addClusterVersion) {
+      repoVersion = new RepositoryVersionEntity();
+      repoVersion.setStack(stackEntity);
+      repoVersion.setOperatingSystems(resourceAsString("org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest_initialRepos.json"));
+      ClusterVersionEntity clusterVersion = new ClusterVersionEntity();
+      clusterVersion.setRepositoryVersion(repoVersion);
+      when(clusterVersionDAO.findByClusterAndStateCurrent(CLUSTER_NAME)).thenReturn(clusterVersion);
+
+    }
 
     activeRepoUpdater = new UpdateActiveRepoVersionOnStartup(clusterDao,
         clusterVersionDAO, repositoryVersionDao, repositoryVersionHelper, metaInfo);
   }
 
+
+
   private static String resourceAsString(String resourceName) throws IOException {
     return Resources.toString(Resources.getResource(resourceName), Charsets.UTF_8);
   }


[06/17] ambari git commit: AMBARI-18051 - Services should be able to provide their own pre-req checks by supplying a jar file

Posted by jo...@apache.org.
AMBARI-18051 - Services should be able to provide their own pre-req checks by supplying a jar file


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/87423d64
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/87423d64
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/87423d64

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 87423d64f54d896c62d1a9245eb03a97763e35a4
Parents: 6d35228
Author: Tim Thorpe <tt...@apache.org>
Authored: Tue Sep 27 06:05:31 2016 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Tue Sep 27 06:05:31 2016 -0700

----------------------------------------------------------------------
 ambari-server/pom.xml                           |  34 +++
 .../ambari/server/checks/CheckDescription.java  |   5 +-
 .../server/checks/UpgradeCheckRegistry.java     |  76 ++++++
 .../PreUpgradeCheckResourceProvider.java        |  27 +-
 .../server/stack/CommonServiceDirectory.java    |  50 ++--
 .../ambari/server/stack/ServiceDirectory.java   | 112 +++++++-
 .../ambari/server/stack/ServiceModule.java      |   4 +
 .../server/stack/StackServiceDirectory.java     |  81 +++---
 .../apache/ambari/server/state/ServiceInfo.java |  14 +
 .../PreUpgradeCheckResourceProviderTest.java    | 255 +++++++++++++++++++
 .../sample/checks/SampleServiceCheck.java       |  35 +++
 .../ambari/server/stack/ServiceModuleTest.java  |  30 +++
 .../server/stack/StackManagerExtensionTest.java |   7 +
 13 files changed, 636 insertions(+), 94 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/87423d64/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 5731c9d..354b6cb 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -576,6 +576,40 @@
         </configuration>
       </plugin>
       <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+        <version>3.0.2</version>
+        <executions>
+          <execution>
+            <id>create-sample-upgrade-check-jar</id>
+            <phase>process-test-classes</phase>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+            <configuration>
+              <outputDirectory>target/test-classes/checks</outputDirectory>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>generate-test-oozie2-checks-dir</id>
+            <phase>process-test-classes</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <tasks>
+                <mkdir dir="target/test-classes/extensions/EXT/0.1/services/OOZIE2/checks/tmp"/>
+              </tasks>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>exec-maven-plugin</artifactId>
         <version>1.2.1</version>

http://git-wip-us.apache.org/repos/asf/ambari/blob/87423d64/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
index 498481d..7fdd0ce 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
@@ -309,9 +309,8 @@ public class CheckDescription {
   private PrereqCheckType m_type;
   private String m_description;
   private Map<String, String> m_fails;
-  public CheckDescription(String name, PrereqCheckType type, String description,
-      Map<String, String> fails) {
-	m_name = name;
+  public CheckDescription(String name, PrereqCheckType type, String description, Map<String, String> fails) {
+    m_name = name;
     m_type = type;
     m_description = description;
     m_fails = fails;

http://git-wip-us.apache.org/repos/asf/ambari/blob/87423d64/ambari-server/src/main/java/org/apache/ambari/server/checks/UpgradeCheckRegistry.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/UpgradeCheckRegistry.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/UpgradeCheckRegistry.java
index 4ed345c..cecf6c5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/UpgradeCheckRegistry.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/UpgradeCheckRegistry.java
@@ -17,14 +17,24 @@
  */
 package org.apache.ambari.server.checks;
 
+import java.io.File;
+import java.io.FilenameFilter;
+import java.net.URL;
+import java.net.URLClassLoader;
 import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
 
 import com.google.inject.Singleton;
+
+import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.stack.UpgradePack;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.util.ClassUtils;
 
 /**
  * The {@link UpgradeCheckRegistry} contains the ordered list of all pre-upgrade
@@ -33,6 +43,7 @@ import org.apache.ambari.server.state.stack.UpgradePack;
  */
 @Singleton
 public class UpgradeCheckRegistry {
+  private static Logger LOG = LoggerFactory.getLogger(UpgradeCheckRegistry.class);
 
   /**
    * The list of upgrade checks to run through.
@@ -59,6 +70,71 @@ public class UpgradeCheckRegistry {
     return new ArrayList<AbstractCheckDescriptor>(m_upgradeChecks);
   }
 
+  public List<AbstractCheckDescriptor> getServiceLevelUpgradeChecks(UpgradePack upgradePack, Map<String, ServiceInfo> services) {
+    List<String> prerequisiteChecks = upgradePack.getPrerequisiteChecks();
+    List<String> missingChecks = new ArrayList<String>();
+    for (String prerequisiteCheck : prerequisiteChecks) {
+      if (!isRegistered(prerequisiteCheck)) {
+        missingChecks.add(prerequisiteCheck);
+      }
+    }
+
+    List<AbstractCheckDescriptor> checks = new ArrayList<>(missingChecks.size());
+    if (missingChecks.isEmpty()) {
+      return checks;
+    }
+
+    List<URL> urls = new ArrayList<URL>();
+    for (ServiceInfo service : services.values()) {
+      File dir = service.getChecksFolder();
+      File[] jars = dir.listFiles(new FilenameFilter() {
+        @Override
+        public boolean accept(File dir, String name) {
+          return name.endsWith(".jar");
+        }
+      });
+      for (File jar : jars) {
+        try {
+          URL url = jar.toURI().toURL();
+          urls.add(url);
+          LOG.debug("Adding service check jar to classpath: {}", url.toString());
+        }
+        catch (Exception e) {
+          LOG.error("Failed to add service check jar to classpath: {}", jar.getAbsolutePath(), e);
+        }
+      }
+    }
+
+    ClassLoader classLoader = new URLClassLoader(urls.toArray(new URL[urls.size()]), ClassUtils.getDefaultClassLoader());
+    for (String prerequisiteCheck : missingChecks) {
+      Class<?> clazz = null;
+      try {
+        clazz = ClassUtils.resolveClassName(prerequisiteCheck, classLoader);
+      }
+      catch (IllegalArgumentException illegalArgumentException) {
+        LOG.error("Unable to find upgrade check {}", prerequisiteCheck, illegalArgumentException);
+      }
+      try {
+        if (clazz != null) {
+          AbstractCheckDescriptor upgradeCheck = (AbstractCheckDescriptor) clazz.newInstance();
+          checks.add(upgradeCheck);
+        }
+      } catch (Exception exception) {
+        LOG.error("Unable to create upgrade check {}", prerequisiteCheck, exception);
+      }
+    }
+    return checks;
+  }
+
+  private boolean isRegistered(String prerequisiteCheck) {
+    for (AbstractCheckDescriptor descriptor: m_upgradeChecks){
+      if (prerequisiteCheck.equals(descriptor.getClass().getName())){
+        return true;
+      }
+    }
+    return false;
+  }
+
   /**
    * Gets an ordered and filtered list of the upgrade checks.
    * @param upgradePack Upgrade pack object with the list of required checks to be included

http://git-wip-us.apache.org/repos/asf/ambari/blob/87423d64/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
index 7d7b618..7e54f83 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProvider.java
@@ -44,21 +44,26 @@ import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.CheckHelper;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.UpgradeHelper;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
+import org.apache.ambari.server.state.stack.UpgradePack;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
+import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Sets;
 import com.google.inject.Inject;
 import com.google.inject.Provider;
-import org.apache.ambari.server.state.stack.UpgradePack;
-import org.apache.ambari.server.state.stack.upgrade.Direction;
-import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
 
 /**
  * Resource provider for pre-upgrade checks.
  */
 @StaticallyInject
 public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
+  private static Logger LOG = LoggerFactory.getLogger(PreUpgradeCheckResourceProvider.class);
 
   //----- Property ID constants ---------------------------------------------
 
@@ -89,6 +94,9 @@ public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
   @Inject
   private static Provider<UpgradeHelper> upgradeHelper;
 
+  @Inject
+  private static CheckHelper checkHelper;
+
   private static Set<String> pkPropertyIds = Collections.singleton(UPGRADE_CHECK_ID_PROPERTY_ID);
 
   public static Set<String> propertyIds = Sets.newHashSet(
@@ -113,9 +121,6 @@ public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
     }
   };
 
-  @Inject
-  private static CheckHelper checkHelper;
-
   /**
    * Constructor.
    *
@@ -185,10 +190,18 @@ public class PreUpgradeCheckResourceProvider extends ReadOnlyResourceProvider {
       }
 
       // ToDo: properly handle exceptions, i.e. create fake check with error description
-
       List<AbstractCheckDescriptor> upgradeChecksToRun = upgradeCheckRegistry.getFilteredUpgradeChecks(upgradePack);
       upgradeCheckRequest.setPrerequisiteCheckConfig(upgradePack.getPrerequisiteCheckConfig());
 
+      try {
+        // Register all the custom prechecks from the services
+        Map<String, ServiceInfo> services = getManagementController().getAmbariMetaInfo().getServices(stackName, upgradePack.getTarget());
+        List<AbstractCheckDescriptor> serviceLevelUpgradeChecksToRun = upgradeCheckRegistry.getServiceLevelUpgradeChecks(upgradePack, services);
+        upgradeChecksToRun.addAll(serviceLevelUpgradeChecksToRun);
+      } catch (AmbariException ambariException) {
+        LOG.error("Unable to register all the custom prechecks from the services", ambariException);
+      }
+
       for (PrerequisiteCheck prerequisiteCheck : checkHelper.performChecks(upgradeCheckRequest, upgradeChecksToRun)) {
         final Resource resource = new ResourceImpl(Resource.Type.PreUpgradeCheck);
         setResourceProperty(resource, UPGRADE_CHECK_ID_PROPERTY_ID, prerequisiteCheck.getId(), requestedIds);

http://git-wip-us.apache.org/repos/asf/ambari/blob/87423d64/ambari-server/src/main/java/org/apache/ambari/server/stack/CommonServiceDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/CommonServiceDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/CommonServiceDirectory.java
index cdedbb4..40e7105 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/CommonServiceDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/CommonServiceDirectory.java
@@ -19,8 +19,6 @@
 package org.apache.ambari.server.stack;
 
 import org.apache.ambari.server.AmbariException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.File;
 
@@ -28,10 +26,6 @@ import java.io.File;
  * Encapsulates IO operations on a common services directory.
  */
 public class CommonServiceDirectory extends ServiceDirectory {
-  /**
-   * logger instance
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(CommonServiceDirectory.class);
 
   /**
    * Constructor.
@@ -62,36 +56,30 @@ public class CommonServiceDirectory extends ServiceDirectory {
 
   @Override
   /**
-   * Calculate the common service directories
-   * packageDir Format: common-services/<serviceName>/<serviceVersion>/package
-   * Example:
-   *  directory: "/var/lib/ambari-server/resources/common-services/HDFS/1.0"
-   *  packageDir: "common-services/HDFS/1.0/package"
+   * @return the service name-version (will be used for logging purposes by superclass)
    */
-  protected void calculateDirectories() {
+  public String getService() {
     File serviceVersionDir = new File(getAbsolutePath());
     File serviceDir = serviceVersionDir.getParentFile();
 
-    String serviceId = String.format("%s/%s", serviceDir.getName(), serviceVersionDir.getName());
+    String service = String.format("%s-%s", serviceDir.getName(), serviceVersionDir.getName());
+    return service;
+  }
 
-    File absPackageDir = new File(getAbsolutePath() + File.separator + PACKAGE_FOLDER_NAME);
-    if(absPackageDir.isDirectory()) {
-      packageDir = absPackageDir.getPath().substring(serviceDir.getParentFile().getParentFile().getPath().length() + 1);
-      LOG.debug(String.format("Service package folder for common service %s has been resolved to %s",
-          serviceId, packageDir));
-    } else {
-      LOG.debug(String.format("Service package folder %s for common service %s does not exist.",
-          absPackageDir, serviceId ));
-    }
+  @Override
+  /**
+   * @return the resources directory
+   */
+  protected File getResourcesDirectory() {
+    File serviceVersionDir = new File(getAbsolutePath());
+    return serviceVersionDir.getParentFile().getParentFile().getParentFile();
+  }
 
-    File absUpgradesDir = new File(getAbsolutePath() + File.separator + UPGRADES_FOLDER_NAME);
-    if(absUpgradesDir.isDirectory()) {
-      upgradesDir = absUpgradesDir;
-      LOG.debug(String.format("Service upgrades folder for common service %s has been resolved to %s",
-          serviceId, upgradesDir));
-    } else {
-      LOG.debug(String.format("Service upgrades folder %s for common service %s does not exist.",
-          absUpgradesDir, serviceId ));
-    }
+  @Override
+  /**
+   * @return the text common-services (will be used for logging purposes by superclass)
+   */
+  public String getStack() {
+    return "common-services";
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/87423d64/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java
index 30663a3..00dc046 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java
@@ -88,6 +88,11 @@ public abstract class ServiceDirectory extends StackDefinitionDirectory {
   protected File upgradesDir;
 
   /**
+   * checks directory path
+   */
+  protected File checksDir;
+
+  /**
    * service metainfo file object representation
    */
   private ServiceMetainfoXml metaInfoXml;
@@ -108,6 +113,11 @@ public abstract class ServiceDirectory extends StackDefinitionDirectory {
   protected static final String UPGRADES_FOLDER_NAME = "upgrades";
 
   /**
+   * checks directory name
+   */
+  protected static final String CHECKS_FOLDER_NAME = "checks";
+
+  /**
    * service metainfo file name
    */
   private static final String SERVICE_METAINFO_FILE_NAME = "metainfo.xml";
@@ -153,6 +163,15 @@ public abstract class ServiceDirectory extends StackDefinitionDirectory {
   }
 
   /**
+   * Obtain the checks directory path.
+   *
+   * @return checks directory path
+   */
+  public File getChecksDir() {
+    return checksDir;
+  }
+
+  /**
    * Obtain the metrics file.
    *
    * @return metrics file
@@ -235,7 +254,7 @@ public abstract class ServiceDirectory extends StackDefinitionDirectory {
    * Parse the service directory.
    */
   protected void parsePath() throws AmbariException {
-    calculateDirectories();
+    calculateDirectories(getStack(), getService());
     parseMetaInfoFile();
 
     File af = new File(directory, AmbariMetaInfo.SERVICE_ALERT_FILE_NAME);
@@ -265,12 +284,101 @@ public abstract class ServiceDirectory extends StackDefinitionDirectory {
 
     File themeFile = new File(directory, AmbariMetaInfo.SERVICE_THEME_FILE_NAME);
     this.themeFile = themeFile.exists() ? themeFile : null;
+
+    File checksFile = new File(directory, AmbariMetaInfo.SERVICE_THEME_FILE_NAME);
+    this.themeFile = themeFile.exists() ? themeFile : null;
   }
 
   /**
+   * @return the service identifier required.  ex: service name for stack services or the service/version for common services
+   */
+  public abstract String getService();
+
+  /**
+   * @return the stack name/version or common-services
+   */
+  public abstract String getStack();
+
+  /**
    * Calculate the service specific directories.
    */
-  protected abstract void calculateDirectories();
+  protected void calculateDirectories(String stack, String service) {
+	  calculatePackageDirectory(stack, service);
+	  calculateUpgradesDirectory(stack, service);
+	  calculateChecksDirectory(stack, service);
+  }
+
+  /**
+   * @param directoryName
+   * @param stack
+   * @param service
+   * @return the directory if it exists and is not empty
+   */
+  protected File resolveDirectory(String directoryName, String stack, String service) {
+    File directory = new File(getAbsolutePath() + File.separator + directoryName);
+    if (directory.isDirectory()) {
+      String[] files = directory.list();
+      int fileCount = files.length;
+      if (fileCount > 0) {
+        LOG.debug("Service {} folder for service {} in {} has been resolved to {}", directoryName, service, stack, directory);
+        return directory;
+      }
+      else {
+        LOG.debug("Service folder {} is empty.", directory);
+      }
+    }
+    else {
+      LOG.debug("Service folder {}does not exist.", directory);
+    }
+    return null;
+  }
+
+  /**
+   * @param directoryName
+   * @param stack
+   * @param service
+   * @return the relative path of the directory if it exists and is not empty
+   */
+  protected String resolveRelativeDirectoryPathString(File resourcesDir, String directoryName, String stack, String service) {
+    File dir = resolveDirectory(directoryName, stack, service);
+    if (dir != null) {
+      return dir.getPath().substring(resourcesDir.getPath().length() + 1);
+    }
+    return null;
+  }
+
+  /**
+   *  @return the resources directory
+   */
+  protected abstract File getResourcesDirectory();
+
+  /**
+   * Sets the packageDir if the path exists and is not empty
+   * @param stack
+   * @param service
+   */
+  protected void calculatePackageDirectory(String stack, String service) {
+    packageDir = resolveRelativeDirectoryPathString(getResourcesDirectory(), PACKAGE_FOLDER_NAME, stack, service);
+
+  }
+
+  /**
+   * Sets the upgradesDir if the dir exists and is not empty
+   * @param stack
+   * @param service
+   */
+  protected void calculateUpgradesDirectory(String stack, String service) {
+    upgradesDir = resolveDirectory(UPGRADES_FOLDER_NAME, stack, service);
+  }
+
+  /**
+   * Sets the checksDir if the dir exists and is not empty
+   * @param stack
+   * @param service
+   */
+  protected void calculateChecksDirectory(String stack, String service) {
+    checksDir = resolveDirectory(CHECKS_FOLDER_NAME, stack, service);
+  }
 
   /**
    * Unmarshal the metainfo file into its object representation.

http://git-wip-us.apache.org/repos/asf/ambari/blob/87423d64/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceModule.java
index a77a22f..650bdf1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceModule.java
@@ -142,6 +142,7 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> implem
     serviceInfo.setSchemaVersion(AmbariMetaInfo.SCHEMA_VERSION_2);
     serviceInfo.setServicePackageFolder(serviceDirectory.getPackageDir());
     serviceInfo.setServiceUpgradesFolder(serviceDirectory.getUpgradesDir());
+    serviceInfo.setChecksFolder(serviceDirectory.getChecksDir());
     serviceInfo.setAdvisorFile(serviceDirectory.getAdvisorFile());
     serviceInfo.setAdvisorName(serviceDirectory.getAdvisorName(serviceInfo.getName()));
 
@@ -253,6 +254,9 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> implem
     if (serviceInfo.getRoleCommandOrder() == null) {
       serviceInfo.setRoleCommandOrder(parent.getRoleCommandOrder());
     }
+    if (serviceInfo.getChecksFolder() == null) {
+      serviceInfo.setChecksFolder(parent.getChecksFolder());
+    }
 
     mergeCustomCommands(parent.getCustomCommands(), serviceInfo.getCustomCommands());
     mergeConfigDependencies(parent);

http://git-wip-us.apache.org/repos/asf/ambari/blob/87423d64/ambari-server/src/main/java/org/apache/ambari/server/stack/StackServiceDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackServiceDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackServiceDirectory.java
index 8656896..611b6bd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackServiceDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackServiceDirectory.java
@@ -28,12 +28,19 @@ import org.apache.ambari.server.state.stack.RepositoryXml;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+
 /**
  * Encapsulates IO operations on a stack service directory.
  */
 public class StackServiceDirectory extends ServiceDirectory {
 
   /**
+   * logger instance
+   */
+  private static final Logger LOG = LoggerFactory.getLogger(StackServiceDirectory.class);
+
+
+  /**
    * repository file
    */
   @Nullable
@@ -45,12 +52,6 @@ public class StackServiceDirectory extends ServiceDirectory {
   @Nullable
   private String repoDir;
 
-
-  /**
-   * logger instance
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(StackServiceDirectory.class);
-
   /**
    * Constructor.
    *
@@ -61,7 +62,6 @@ public class StackServiceDirectory extends ServiceDirectory {
     super(servicePath);
   }
 
-
   /**
    * Obtain the repository xml file if exists or null
    *
@@ -82,7 +82,6 @@ public class StackServiceDirectory extends ServiceDirectory {
     return repoDir;
   }
 
-
   @Override
   /**
    * Obtain the advisor name.
@@ -129,55 +128,35 @@ public class StackServiceDirectory extends ServiceDirectory {
 
   @Override
   /**
-   * Calculate the stack service directories.
-   * packageDir Format: stacks/<stackName>/<stackVersion>/services/<serviceName>/package
-   * Example:
-   *  directory: "/var/lib/ambari-server/resources/stacks/HDP/2.0.6/services/HDFS"
-   *  packageDir: "stacks/HDP/2.0.6/services/HDFS/package"
+   * @return the resources directory
    */
-  protected void calculateDirectories() {
+  protected File getResourcesDirectory() {
     File serviceDir = new File(getAbsolutePath());
-    File stackVersionDir = serviceDir.getParentFile().getParentFile();
-    File stackDir = stackVersionDir.getParentFile();
+    return serviceDir.getParentFile().getParentFile().getParentFile().getParentFile().getParentFile();
+  }
 
-    String stackId = String.format("%s-%s", stackDir.getName(), stackVersionDir.getName());
 
-    File absPackageDir = new File(getAbsolutePath() + File.separator + PACKAGE_FOLDER_NAME);
-    if (absPackageDir.isDirectory()) {
-      String[] files = absPackageDir.list();
-      int fileCount = files.length;
-      if (fileCount > 0) {
-        packageDir = absPackageDir.getPath().substring(stackDir.getParentFile().getParentFile().getPath().length() + 1);
-        LOG.debug("Service package folder for service {} for stack {} has been resolved to {}",
-                serviceDir.getName(), stackId, packageDir);
-      }
-      else {
-        LOG.debug("Service package folder {} for service {} for stack {} is empty.",
-                absPackageDir, serviceDir.getName(), stackId);
-      }
-    } else {
-      LOG.debug("Service package folder {} for service {} for stack {} does not exist.",
-              absPackageDir, serviceDir.getName(), stackId);
-    }
+  @Override
+  /**
+   * @return the service name (will be used for logging purposes by superclass)
+   */
+  public String getService() {
+    File serviceDir = new File(getAbsolutePath());
 
-    File absUpgradesDir = new File(getAbsolutePath() + File.separator + UPGRADES_FOLDER_NAME);
-    if (absUpgradesDir.isDirectory()) {
-      String[] files = absUpgradesDir.list();
-      int fileCount = files.length;
-      if (fileCount > 0) {
-        upgradesDir = absUpgradesDir;
-        LOG.debug("Service upgrades folder for service {} for stack {} has been resolved to {}",
-                serviceDir.getName(), stackId, packageDir);
-      }
-      else {
-        LOG.debug("Service upgrades folder {} for service {} for stack {} is empty.",
-                absUpgradesDir, serviceDir.getName(), stackId);
-      }
-    } else {
-      LOG.debug("Service upgrades folder {} for service {} for stack {} does not exist.",
-              absUpgradesDir, serviceDir.getName(), stackId);
-    }
+    return serviceDir.getName();
   }
 
+  @Override
+  /**
+   * @return the stack name-version (will be used for logging purposes by superclass)
+   */
+  public String getStack() {
+    File serviceDir = new File(getAbsolutePath());
+    File stackVersionDir = serviceDir.getParentFile().getParentFile();
+    File stackDir = stackVersionDir.getParentFile();
+
+    String stackId = String.format("%s-%s", stackDir.getName(), stackVersionDir.getName());
+    return stackId;
+  }
 
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/87423d64/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
index 6fda8bc..b0d81c3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
@@ -270,6 +270,12 @@ public class ServiceInfo implements Validable{
   @XmlTransient
   private File serviceUpgradesFolder;
 
+  /**
+   * Stores the path to the checks folder which contains prereq check jars for the given service.
+   */
+  @XmlTransient
+  private File checksFolder;
+
   public boolean isDeleted() {
     return isDeleted;
   }
@@ -600,6 +606,14 @@ public String getVersion() {
     this.serviceUpgradesFolder = serviceUpgradesFolder;
   }
 
+  public File getChecksFolder() {
+    return checksFolder;
+  }
+
+  public void setChecksFolder(File checksFolder) {
+    this.checksFolder = checksFolder;
+  }
+
   /**
    * Exposes (and initializes on first use) map of os-specific details.
    * @return  map of OS specific details keyed by family

http://git-wip-us.apache.org/repos/asf/ambari/blob/87423d64/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java
new file mode 100644
index 0000000..6a0fa12
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java
@@ -0,0 +1,255 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.internal;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.actionmanager.ActionDBAccessor;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.checks.AbstractCheckDescriptor;
+import org.apache.ambari.server.checks.UpgradeCheckRegistry;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.controller.MaintenanceStateHelper;
+import org.apache.ambari.server.controller.RequestStatusResponse;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.ResourceProvider;
+import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.controller.utilities.PredicateBuilder;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.apache.ambari.server.events.jpa.EntityManagerCacheInvalidationEvent;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.scheduler.ExecutionScheduler;
+import org.apache.ambari.server.stack.StackManagerFactory;
+import org.apache.ambari.server.state.CheckHelper;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceFactory;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeHelper;
+import org.apache.ambari.server.state.stack.OsFamily;
+import org.apache.ambari.server.state.stack.PrereqCheckStatus;
+import org.apache.ambari.server.state.stack.PrereqCheckType;
+import org.apache.ambari.server.state.stack.UpgradePack;
+import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
+import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
+import org.easymock.EasyMock;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.File;
+import java.lang.reflect.Field;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import javax.persistence.EntityManager;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.Provider;
+
+import static org.easymock.EasyMock.anyBoolean;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.createStrictMock;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.isNull;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+
+/**
+ * PreUpgradeCheckResourceProvider tests.
+ */
+public class PreUpgradeCheckResourceProviderTest {
+
+  @Test
+  public void testGetResources() throws Exception{
+    Injector injector = createInjector();
+    AmbariManagementController managementController = injector.getInstance(AmbariManagementController.class);
+
+    Clusters clusters = injector.getInstance(Clusters.class);
+    UpgradeHelper upgradeHelper = injector.getInstance(UpgradeHelper.class);
+
+    RepositoryVersionDAO repoDao = injector.getInstance(RepositoryVersionDAO.class);
+    RepositoryVersionEntity repo = createNiceMock(RepositoryVersionEntity.class);
+    UpgradePack upgradePack = createNiceMock(UpgradePack.class);
+    PrerequisiteCheckConfig config = createNiceMock(PrerequisiteCheckConfig.class);
+
+    Cluster cluster = createNiceMock(Cluster.class);
+    Service service = createNiceMock(Service.class);
+    ServiceInfo serviceInfo = createNiceMock(ServiceInfo.class);
+
+    StackId currentStackId = createNiceMock(StackId.class);
+    StackId targetStackId = createNiceMock(StackId.class);
+    ServiceFactory serviceFactory = createNiceMock(ServiceFactory.class);
+    AmbariMetaInfo ambariMetaInfo = createNiceMock(AmbariMetaInfo.class);
+
+    Map<String, Service> allServiceMap = new HashMap<String, Service>();
+    allServiceMap.put("Service100", service);
+    Map<String, ServiceInfo> allServiceInfoMap = new HashMap<String, ServiceInfo>();
+    allServiceInfoMap.put("Service100", serviceInfo);
+
+    // set expectations
+    expect(managementController.getClusters()).andReturn(clusters).anyTimes();
+    expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+    expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
+
+    expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
+    expect(cluster.getServices()).andReturn(allServiceMap).anyTimes();
+    expect(cluster.getService("Service100")).andReturn(service).anyTimes();
+    expect(cluster.getCurrentStackVersion()).andReturn(currentStackId).anyTimes();
+
+    expect(currentStackId.getStackName()).andReturn("Stack100").anyTimes();
+    expect(currentStackId.getStackVersion()).andReturn("1.0").anyTimes();
+    expect(targetStackId.getStackName()).andReturn("Stack100").anyTimes();
+    expect(targetStackId.getStackVersion()).andReturn("1.1").anyTimes();
+
+    expect(repoDao.findByStackNameAndVersion("Stack100", "Repo100")).andReturn(repo).anyTimes();
+    expect(repo.getStackId()).andReturn(targetStackId).atLeastOnce();
+    expect(upgradeHelper.suggestUpgradePack("Cluster100", "1.0", "Repo100", Direction.UPGRADE, UpgradeType.NON_ROLLING, "upgrade_pack11")).andReturn(upgradePack);
+
+    List<AbstractCheckDescriptor> upgradeChecksToRun = new LinkedList<AbstractCheckDescriptor>();
+    List<String> prerequisiteChecks = new LinkedList<String>();
+    prerequisiteChecks.add("org.apache.ambari.server.sample.checks.SampleServiceCheck");
+    expect(upgradePack.getPrerequisiteCheckConfig()).andReturn(config);
+    expect(upgradePack.getPrerequisiteChecks()).andReturn(prerequisiteChecks).anyTimes();
+    expect(upgradePack.getTarget()).andReturn("1.1").anyTimes();
+
+    expect(ambariMetaInfo.getServices("Stack100", "1.1")).andReturn(allServiceInfoMap).anyTimes();
+    String checks = ClassLoader.getSystemClassLoader().getResource("checks").getPath();
+    expect(serviceInfo.getChecksFolder()).andReturn(new File(checks));
+
+    // replay
+    replay(managementController, clusters, cluster, service, serviceInfo, repoDao, repo, upgradeHelper,
+        ambariMetaInfo, upgradePack, config, currentStackId, targetStackId, serviceFactory);
+
+    ResourceProvider provider = getPreUpgradeCheckResourceProvider(managementController, injector);
+    // create the request
+    Request request = PropertyHelper.getReadRequest(new HashSet<String>());
+    PredicateBuilder builder = new PredicateBuilder();
+    Predicate predicate = builder.property(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and()
+        .property(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_UPGRADE_PACK_PROPERTY_ID).equals("upgrade_pack11").and()
+        .property(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID).equals(UpgradeType.NON_ROLLING).and()
+        .property(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID).equals("Repo100").toPredicate();
+
+
+    System.out.println("PreUpgradeCheckResourceProvider - " + provider);
+    Set<Resource> resources = Collections.emptySet();
+    try {
+      resources = provider.getResources(request, predicate);
+    }
+    catch (Exception e) {
+      e.printStackTrace();
+    }
+
+    Assert.assertEquals(1, resources.size());
+    for (Resource resource : resources) {
+      String id = (String) resource.getPropertyValue(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_ID_PROPERTY_ID);
+      Assert.assertEquals("SAMPLE_SERVICE_CHECK", id);
+      String description = (String) resource.getPropertyValue(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_CHECK_PROPERTY_ID);
+      Assert.assertEquals("Sample service check description.", description);
+      PrereqCheckStatus status = (PrereqCheckStatus) resource.getPropertyValue(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_STATUS_PROPERTY_ID);
+      Assert.assertEquals(PrereqCheckStatus.FAIL, status);
+      String reason = (String) resource.getPropertyValue(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_REASON_PROPERTY_ID);
+      Assert.assertEquals("Sample service check always fails.", reason);
+      PrereqCheckType checkType = (PrereqCheckType) resource.getPropertyValue(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_CHECK_TYPE_PROPERTY_ID);
+      Assert.assertEquals(PrereqCheckType.HOST, checkType);
+      String clusterName = (String) resource.getPropertyValue(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_CLUSTER_NAME_PROPERTY_ID);
+      Assert.assertEquals("Cluster100", clusterName);
+      UpgradeType upgradeType = (UpgradeType) resource.getPropertyValue(PreUpgradeCheckResourceProvider.UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID);
+      Assert.assertEquals(UpgradeType.NON_ROLLING, upgradeType);
+    }
+
+    // verify
+    verify(managementController, clusters, cluster, service, serviceInfo, repoDao, repo, upgradeHelper,
+            ambariMetaInfo, upgradePack, config, currentStackId, targetStackId, serviceFactory);
+  }
+
+  /**
+   * This factory method creates PreUpgradeCheckResourceProvider using the mock managementController
+   */
+  public PreUpgradeCheckResourceProvider getPreUpgradeCheckResourceProvider(AmbariManagementController managementController, Injector injector) throws  AmbariException {
+    //UpgradeHelper upgradeHelper = injector.getInstance(UpgradeHelper.class);
+    //injector.injectMembers(upgradeHelper);
+    PreUpgradeCheckResourceProvider provider = new PreUpgradeCheckResourceProvider(managementController);
+    return provider;
+  }
+
+  static class TestClustersProvider implements Provider<Clusters> {
+    private static Clusters clusters = createNiceMock(Clusters.class);
+
+    @Override
+    public Clusters get() {
+      return clusters;
+    }
+  }
+
+  static class TestUpgradeHelperProvider implements Provider<UpgradeHelper> {
+    private static UpgradeHelper upgradeHelper = createNiceMock(UpgradeHelper.class);
+
+    @Override
+    public UpgradeHelper get() {
+      return upgradeHelper;
+    }
+  }
+
+  private Injector createInjector() throws Exception {
+    return Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        Provider<Clusters> clustersProvider = new TestClustersProvider();
+        Provider<UpgradeHelper> upgradeHelperProvider = new TestUpgradeHelperProvider();
+        CheckHelper checkHelper = new CheckHelper();
+        UpgradeCheckRegistry registry = new UpgradeCheckRegistry();
+
+        bind(AmbariManagementController.class).toInstance(createNiceMock(AmbariManagementController.class));
+        bind(CheckHelper.class).toInstance(checkHelper);
+        bind(Clusters.class).toProvider(TestClustersProvider.class);
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+        bind(RepositoryVersionDAO.class).toInstance(createNiceMock(RepositoryVersionDAO.class));
+        bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
+        bind(UpgradeCheckRegistry.class).toInstance(registry);
+        bind(UpgradeHelper.class).toProvider(TestUpgradeHelperProvider.class);
+
+        requestStaticInjection(PreUpgradeCheckResourceProvider.class);
+      }
+    });
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/87423d64/ambari-server/src/test/java/org/apache/ambari/server/sample/checks/SampleServiceCheck.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/sample/checks/SampleServiceCheck.java b/ambari-server/src/test/java/org/apache/ambari/server/sample/checks/SampleServiceCheck.java
new file mode 100644
index 0000000..c91793e
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/sample/checks/SampleServiceCheck.java
@@ -0,0 +1,35 @@
+package org.apache.ambari.server.sample.checks;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.checks.AbstractCheckDescriptor;
+import org.apache.ambari.server.checks.CheckDescription;
+import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.state.stack.PrereqCheckStatus;
+import org.apache.ambari.server.state.stack.PrereqCheckType;
+import org.apache.ambari.server.state.stack.PrerequisiteCheck;
+
+import com.google.common.collect.ImmutableMap;
+
+public class SampleServiceCheck extends AbstractCheckDescriptor {
+
+  public SampleServiceCheck() {
+    super(new CheckDescription("SAMPLE_SERVICE_CHECK",
+          PrereqCheckType.HOST,
+          "Sample service check description.",
+          new ImmutableMap.Builder<String, String>()
+                          .put(AbstractCheckDescriptor.DEFAULT,
+                              "Sample service check default property description.").build()));
+  }
+
+  @Override
+  public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException {
+    prerequisiteCheck.setFailReason("Sample service check always fails.");
+    prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL);
+  }
+
+  @Override
+  public boolean isStackUpgradeAllowedToBypassPreChecks() {
+    return false;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/87423d64/ambari-server/src/test/java/org/apache/ambari/server/stack/ServiceModuleTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/ServiceModuleTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/ServiceModuleTest.java
index 304fd5c..a9a8fdb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/ServiceModuleTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/ServiceModuleTest.java
@@ -437,6 +437,36 @@ public class ServiceModuleTest {
   }
 
   @Test
+  public void testResolve_UpgradeCheckDirectory() throws Exception {
+    File checks = new File("checks");
+
+    // check directory specified in child only
+    ServiceInfo info = new ServiceInfo();
+    ServiceInfo parentInfo = new ServiceInfo();
+    ServiceModule child = createServiceModule(info);
+    ServiceModule parent = createServiceModule(parentInfo);
+    child.getModuleInfo().setChecksFolder(checks);
+    resolveService(child, parent);
+    assertEquals(checks.getPath(), child.getModuleInfo().getChecksFolder().getPath());
+
+    // check directory specified in parent only
+    child = createServiceModule(info);
+    parent = createServiceModule(parentInfo);
+    parent.getModuleInfo().setChecksFolder(checks);
+    resolveService(child, parent);
+    assertEquals(checks.getPath(), child.getModuleInfo().getChecksFolder().getPath());
+
+    // check directory set in both
+    info.setChecksFolder(checks);
+    child = createServiceModule(info);
+    child.getModuleInfo().setChecksFolder(checks);
+    parent = createServiceModule(parentInfo);
+    parent.getModuleInfo().setChecksFolder(new File("other"));
+    resolveService(child, parent);
+    assertEquals(checks.getPath(), child.getModuleInfo().getChecksFolder().getPath());
+  }
+
+  @Test
   public void testResolve_CustomCommands() throws Exception {
     List<CustomCommandDefinition> customCommands = new ArrayList<CustomCommandDefinition>();
     CustomCommandDefinition cmd1 = new CustomCommandDefinition();

http://git-wip-us.apache.org/repos/asf/ambari/blob/87423d64/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
index 659ae12..044f2c4 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
@@ -29,6 +29,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.File;
+import java.io.FilenameFilter;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -105,6 +106,9 @@ public class StackManagerExtensionTest  {
     assertNotNull("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder());
     assertTrue("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder().contains("extensions/EXT/0.1/services/OOZIE2/package"));
     assertEquals(oozie.getVersion(), "3.2.0");
+    File checks = oozie.getChecksFolder();
+    assertNotNull(checks);
+    assertTrue("Checks dir is " + checks.getPath(), checks.getPath().contains("extensions/EXT/0.1/services/OOZIE2/checks"));
 
     extension = stackManager.getExtension("EXT", "0.2");
     assertNotNull("EXT 0.2's parent: " + extension.getParentExtensionVersion(), extension.getParentExtensionVersion());
@@ -114,6 +118,9 @@ public class StackManagerExtensionTest  {
     assertNotNull("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder());
     assertTrue("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder().contains("extensions/EXT/0.1/services/OOZIE2/package"));
     assertEquals(oozie.getVersion(), "4.0.0");
+    checks = oozie.getChecksFolder();
+    assertNotNull(checks);
+    assertTrue("Checks dir is " + checks.getPath(), checks.getPath().contains("extensions/EXT/0.1/services/OOZIE2/checks"));
 
     StackInfo stack = stackManager.getStack("HDP", "0.2");
     assertNotNull(stack.getService("OOZIE2"));


[04/17] ambari git commit: AMBARI-18468 - [Grafana] Incorrect metric values displayed when there are multiple Kafka Brokers (prajwal)

Posted by jo...@apache.org.
AMBARI-18468 - [Grafana] Incorrect metric values displayed when there are multiple Kafka Brokers (prajwal)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5af6d547
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5af6d547
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5af6d547

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 5af6d547fa9c5d225e011e18297b2cef703237a9
Parents: 7c8ada1
Author: Prajwal Rao <pr...@gmail.com>
Authored: Mon Sep 26 14:12:46 2016 -0700
Committer: Prajwal Rao <pr...@gmail.com>
Committed: Mon Sep 26 14:12:46 2016 -0700

----------------------------------------------------------------------
 .../files/grafana-dashboards/HDF/grafana-kafka-home.json  | 10 +++++-----
 .../files/grafana-dashboards/HDP/grafana-kafka-home.json  | 10 +++++-----
 2 files changed, 10 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5af6d547/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDF/grafana-kafka-home.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDF/grafana-kafka-home.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDF/grafana-kafka-home.json
index b754231..5ec4404 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDF/grafana-kafka-home.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDF/grafana-kafka-home.json
@@ -233,7 +233,7 @@
           },
           "targets": [
             {
-              "aggregator": "avg",
+              "aggregator": "sum",
               "alias": "Active Controller Count",
               "app": "kafka_broker",
               "downsampleAggregator": "avg",
@@ -291,7 +291,7 @@
           },
           "targets": [
             {
-              "aggregator": "avg",
+              "aggregator": "sum",
               "alias": "Replica MaxLag",
               "app": "kafka_broker",
               "downsampleAggregator": "avg",
@@ -349,7 +349,7 @@
           },
           "targets": [
             {
-              "aggregator": "avg",
+              "aggregator": "sum",
               "alias": "Leader Count",
               "app": "kafka_broker",
               "downsampleAggregator": "avg",
@@ -416,7 +416,7 @@
           },
           "targets": [
             {
-              "aggregator": "avg",
+              "aggregator": "sum",
               "alias": "UnderReplicatedPartitions",
               "app": "kafka_broker",
               "downsampleAggregator": "avg",
@@ -473,7 +473,7 @@
           },
           "targets": [
             {
-              "aggregator": "avg",
+              "aggregator": "sum",
               "alias": "OfflinePartitionsCount",
               "app": "kafka_broker",
               "downsampleAggregator": "avg",

http://git-wip-us.apache.org/repos/asf/ambari/blob/5af6d547/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-kafka-home.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-kafka-home.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-kafka-home.json
index b754231..5ec4404 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-kafka-home.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-kafka-home.json
@@ -233,7 +233,7 @@
           },
           "targets": [
             {
-              "aggregator": "avg",
+              "aggregator": "sum",
               "alias": "Active Controller Count",
               "app": "kafka_broker",
               "downsampleAggregator": "avg",
@@ -291,7 +291,7 @@
           },
           "targets": [
             {
-              "aggregator": "avg",
+              "aggregator": "sum",
               "alias": "Replica MaxLag",
               "app": "kafka_broker",
               "downsampleAggregator": "avg",
@@ -349,7 +349,7 @@
           },
           "targets": [
             {
-              "aggregator": "avg",
+              "aggregator": "sum",
               "alias": "Leader Count",
               "app": "kafka_broker",
               "downsampleAggregator": "avg",
@@ -416,7 +416,7 @@
           },
           "targets": [
             {
-              "aggregator": "avg",
+              "aggregator": "sum",
               "alias": "UnderReplicatedPartitions",
               "app": "kafka_broker",
               "downsampleAggregator": "avg",
@@ -473,7 +473,7 @@
           },
           "targets": [
             {
-              "aggregator": "avg",
+              "aggregator": "sum",
               "alias": "OfflinePartitionsCount",
               "app": "kafka_broker",
               "downsampleAggregator": "avg",


[12/17] ambari git commit: AMBARI-18455. Ambari dashboard HDFS links widget incorrectly shows 2 standby namenode (akovalenko)

Posted by jo...@apache.org.
AMBARI-18455. Ambari dashboard HDFS links widget incorrectly shows 2 standby namenode (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a0fff847
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a0fff847
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a0fff847

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: a0fff8472a3e60972a0ae57d30c09e867d86ad0f
Parents: 6fb1cee
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Wed Sep 28 13:25:02 2016 +0300
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Wed Sep 28 13:25:02 2016 +0300

----------------------------------------------------------------------
 ambari-web/app/views/main/dashboard/widgets/hdfs_links.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a0fff847/ambari-web/app/views/main/dashboard/widgets/hdfs_links.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/dashboard/widgets/hdfs_links.js b/ambari-web/app/views/main/dashboard/widgets/hdfs_links.js
index 128b997..cfe5eb1 100644
--- a/ambari-web/app/views/main/dashboard/widgets/hdfs_links.js
+++ b/ambari-web/app/views/main/dashboard/widgets/hdfs_links.js
@@ -43,7 +43,7 @@ App.HDFSLinksView = App.LinkDashboardWidgetView.extend({
 
   isStandbyNNValid: Em.computed.bool('model.standbyNameNode'),
 
-  isTwoStandbyNN: Em.computed.and('isActiveNNValid', 'isStandbyNNValid'),
+  isTwoStandbyNN: Em.computed.and('model.standbyNameNode', 'model.standbyNameNode2'),
 
   twoStandbyComponent: function () {
     return App.HostComponent.find().findProperty('componentName', 'NAMENODE');


[02/17] ambari git commit: AMBARI-18449 Ambari create widget does not show what the error is (Vivek Ratnavel Subramanian via zhewang)

Posted by jo...@apache.org.
AMBARI-18449 Ambari create widget does not show what the error is (Vivek Ratnavel Subramanian via zhewang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/69e8f6fa
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/69e8f6fa
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/69e8f6fa

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 69e8f6fa89b5b2a4e6246e0e32b23c314a12df5c
Parents: ee4e63a
Author: Zhe (Joe) Wang <zh...@apache.org>
Authored: Mon Sep 26 13:12:09 2016 -0700
Committer: Zhe (Joe) Wang <zh...@apache.org>
Committed: Mon Sep 26 13:12:09 2016 -0700

----------------------------------------------------------------------
 .../service/widgets/create/step2_controller.js  | 26 ++++++++++++++++++++
 .../main/service/widgets/create/expression.hbs  | 10 ++++++--
 .../main/service/widgets/create/step2_graph.hbs |  6 ++++-
 .../service/widgets/create/step2_number.hbs     |  6 ++++-
 .../service/widgets/create/step2_template.hbs   |  6 ++++-
 .../service/widgets/create/expression_view.js   | 14 +++++++++--
 6 files changed, 61 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/69e8f6fa/ambari-web/app/controllers/main/service/widgets/create/step2_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/widgets/create/step2_controller.js b/ambari-web/app/controllers/main/service/widgets/create/step2_controller.js
index 4e3ab91..8b1045a 100644
--- a/ambari-web/app/controllers/main/service/widgets/create/step2_controller.js
+++ b/ambari-web/app/controllers/main/service/widgets/create/step2_controller.js
@@ -138,6 +138,32 @@ App.WidgetWizardStep2Controller = Em.Controller.extend({
   },
 
   /**
+   * check whether any of the expressions is incomplete or invalid
+   * @returns {boolean}
+   */
+  isAnyExpressionInvalid: function() {
+    var isAnyExpressionInvalid = false;
+    switch (this.get('content.widgetType')) {
+      case "NUMBER":
+      case "GAUGE":
+      case "TEMPLATE":
+        isAnyExpressionInvalid = this.get('isSubmitDisabled') && this.get('expressions').someProperty('isEmpty', false);
+        break;
+      case "GRAPH":
+        var dataSets = this.get('dataSets'),
+          isNotEmpty = false;
+        for (var i = 0; i < dataSets.length; i++) {
+          if (dataSets[i].get('expression.data').length > 0) {
+            isNotEmpty = true;
+            break;
+          }
+        }
+        isAnyExpressionInvalid = this.get('isSubmitDisabled') && isNotEmpty;
+    }
+    return isAnyExpressionInvalid;
+  }.property('isSubmitDisabled'),
+
+  /**
    * check whether data of graph widget is complete
    * @param dataSets
    * @returns {boolean} isComplete

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e8f6fa/ambari-web/app/templates/main/service/widgets/create/expression.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/widgets/create/expression.hbs b/ambari-web/app/templates/main/service/widgets/create/expression.hbs
index 826cb43..0ad08bf 100644
--- a/ambari-web/app/templates/main/service/widgets/create/expression.hbs
+++ b/ambari-web/app/templates/main/service/widgets/create/expression.hbs
@@ -33,8 +33,15 @@
     {{view App.AddNumberExpressionView valueBinding="view.numberValue" class="input-small"}}
     <button class="btn add-on" {{action addNumber target="view"}} {{bindAttr disabled="view.isNumberValueInvalid"}}>{{t dashboard.widgets.wizard.step2.newNumber}}</button>
   </div>
-
 </div>
+{{#if view.isInvalid}}
+  <div class="alert alert-error">
+    Invalid expression!
+    {{#if view.isInvalidTextfield}}
+      Only numbers or operators are allowed in this field.
+    {{/if}}
+  </div>
+{{/if}}
 <div class="metric-field">
   {{#if view.expression.isRemovable}}
       <a {{action removeExpression view.expression target="controller"}} class="remove-link"><i class="icon-trash"></i></a>
@@ -54,6 +61,5 @@
     </div>
     <div class="placeholder">{{t dashboard.widgets.wizard.step2.addMetrics}}</div>
   {{/if}}
-
 </div>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e8f6fa/ambari-web/app/templates/main/service/widgets/create/step2_graph.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/widgets/create/step2_graph.hbs b/ambari-web/app/templates/main/service/widgets/create/step2_graph.hbs
index 17e4790..483abd6 100644
--- a/ambari-web/app/templates/main/service/widgets/create/step2_graph.hbs
+++ b/ambari-web/app/templates/main/service/widgets/create/step2_graph.hbs
@@ -20,7 +20,11 @@
 <div class="alert alert-info">
   {{t widget.create.wizard.step2.body.text}}
 </div>
-
+{{#if isAnyExpressionInvalid}}
+  <div class="alert alert-error">
+    Expression is not complete or is invalid!
+  </div>
+{{/if}}
 {{#each dataSet in dataSets}}
   <fieldset>
     <h5>{{view Ember.TextField valueBinding="dataSet.label"}}</h5>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e8f6fa/ambari-web/app/templates/main/service/widgets/create/step2_number.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/widgets/create/step2_number.hbs b/ambari-web/app/templates/main/service/widgets/create/step2_number.hbs
index 27fd7e4..13c4f4b 100644
--- a/ambari-web/app/templates/main/service/widgets/create/step2_number.hbs
+++ b/ambari-web/app/templates/main/service/widgets/create/step2_number.hbs
@@ -20,7 +20,11 @@
 <div class="alert alert-info">
   {{t widget.create.wizard.step2.body.text}}
 </div>
-
+{{#if isAnyExpressionInvalid}}
+  <div class="alert alert-error">
+    Expression is not complete or is invalid!
+  </div>
+{{/if}}
 {{#each expression in expressions}}
   {{view App.WidgetWizardExpressionView expressionBinding="expression"}}
 {{/each}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e8f6fa/ambari-web/app/templates/main/service/widgets/create/step2_template.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/widgets/create/step2_template.hbs b/ambari-web/app/templates/main/service/widgets/create/step2_template.hbs
index 6a8a469..c0faedc 100644
--- a/ambari-web/app/templates/main/service/widgets/create/step2_template.hbs
+++ b/ambari-web/app/templates/main/service/widgets/create/step2_template.hbs
@@ -33,7 +33,11 @@
 <div class="alert alert-info">
   {{t widget.create.wizard.step2.body.text}}
 </div>
-
+{{#if isAnyExpressionInvalid}}
+  <div class="alert alert-error">
+    Expression is not complete or is invalid!
+  </div>
+{{/if}}
 {{#each expression in expressions}}
   <h5>{{EXPRESSION_PREFIX}}{{expression.id}}</h5>
   {{view App.WidgetWizardExpressionView expressionBinding="expression"}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e8f6fa/ambari-web/app/views/main/service/widgets/create/expression_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/widgets/create/expression_view.js b/ambari-web/app/views/main/service/widgets/create/expression_view.js
index 32c664f..a12bf99 100644
--- a/ambari-web/app/views/main/service/widgets/create/expression_view.js
+++ b/ambari-web/app/views/main/service/widgets/create/expression_view.js
@@ -67,6 +67,11 @@ App.WidgetWizardExpressionView = Em.View.extend({
   isInvalid: false,
 
   /**
+   * @type {boolean}
+   */
+  isInvalidTextfield: false,
+
+  /**
    * contains value of number added to expression
    * @type {string}
    */
@@ -431,7 +436,8 @@ App.InputCursorTextfieldView = Ember.TextField.extend({
   validateInput: function () {
     var value = this.get('value');
     var parentView = this.get('parentView');
-    var isInvalid = false;
+    var isInvalid = false,
+      isInvalidTextfield = false;
 
     if (!number_utils.isPositiveNumber(value))  {
       if (value && parentView.get('OPERATORS').contains(value)) {
@@ -451,10 +457,12 @@ App.InputCursorTextfieldView = Ember.TextField.extend({
         this.set('value', '');
       } else if (value) {
         // invalid operator
-        isInvalid = true;
+        isInvalid = isInvalidTextfield = true;
       }
     }
     this.set('isInvalid', isInvalid);
+    this.set('parentView.isInvalid', isInvalid);
+    this.set('parentView.isInvalidTextfield', isInvalidTextfield);
   }.observes('value'),
 
   keyDown: function (event) {
@@ -479,6 +487,8 @@ App.InputCursorTextfieldView = Ember.TextField.extend({
       }));
       this.set('numberValue', "");
       this.set('isInvalid', false);
+      this.set('parentView.isInvalid', false);
+      this.set('parentView.isInvalidTextfield', false);
       this.set('value', '');
     }
   }


[17/17] ambari git commit: Merge branch 'trunk' into branch-feature-AMBARI-18456

Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f0da4fa4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f0da4fa4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f0da4fa4

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: f0da4fa49ec8ce420ce4f495855af9b3e3b8d975
Parents: 561c6f2 aad2133
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Sep 28 10:17:25 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Sep 28 10:17:25 2016 -0400

----------------------------------------------------------------------
 .../ambari-metrics/datasource.js                |  64 +++
 ambari-server/pom.xml                           |  36 ++
 .../ambari/server/checks/CheckDescription.java  |   5 +-
 .../server/checks/UpgradeCheckRegistry.java     |  76 +++
 .../PreUpgradeCheckResourceProvider.java        |  27 +-
 .../server/stack/CommonServiceDirectory.java    |  50 +-
 .../ambari/server/stack/ServiceDirectory.java   | 112 +++-
 .../ambari/server/stack/ServiceModule.java      |   8 +-
 .../apache/ambari/server/stack/StackModule.java |  37 +-
 .../server/stack/StackServiceDirectory.java     |  81 +--
 .../stack/UpdateActiveRepoVersionOnStartup.java |   9 +-
 .../apache/ambari/server/state/ServiceInfo.java |  14 +
 ambari-server/src/main/resources/alerts.json    |  31 ++
 .../HDF/grafana-kafka-home.json                 |  10 +-
 .../HDF/grafana-storm-kafka-offset.json         | 258 +++++++++
 .../HDP/grafana-kafka-home.json                 |  10 +-
 .../HDP/grafana-storm-kafka-offset.json         | 258 +++++++++
 .../2.1.0.2.0/package/scripts/params_linux.py   |   7 +
 .../YARN/2.1.0.2.0/package/scripts/yarn.py      | 548 +++++++++----------
 .../main/resources/host_scripts/alert_ulimit.py |  83 +++
 .../scripts/shared_initialization.py            |   2 +-
 .../PreUpgradeCheckResourceProviderTest.java    | 255 +++++++++
 .../sample/checks/SampleServiceCheck.java       |  52 ++
 .../ambari/server/stack/ServiceModuleTest.java  |  30 +
 .../server/stack/StackManagerExtensionTest.java |   7 +
 .../UpdateActiveRepoVersionOnStartupTest.java   |  28 +-
 .../test/python/host_scripts/TestAlertUlimit.py |  44 ++
 .../stacks/2.0.6/YARN/test_historyserver.py     |   1 -
 .../stacks/2.0.6/YARN/test_resourcemanager.py   |  54 +-
 .../stacks/2.1/YARN/test_apptimelineserver.py   |  40 +-
 .../test/python/stacks/2.3/YARN/test_ats_1_5.py | 188 +++----
 ambari-server/src/test/python/unitTests.py      |  25 +-
 ambari-web/app/assets/licenses/NOTICE.txt       |   3 +
 .../service/widgets/create/step2_controller.js  |  26 +
 ambari-web/app/messages.js                      |  13 +-
 .../app/mixins/common/configs/configs_loader.js |   2 +-
 ambari-web/app/models/cluster_states.js         |   4 +-
 ambari-web/app/routes/add_kerberos_routes.js    |   4 +-
 .../main/service/widgets/create/expression.hbs  |  10 +-
 .../main/service/widgets/create/step2_graph.hbs |   6 +-
 .../service/widgets/create/step2_number.hbs     |   6 +-
 .../service/widgets/create/step2_template.hbs   |   6 +-
 ambari-web/app/utils/string_utils.js            |   7 +-
 .../views/common/configs/config_history_flow.js |  46 +-
 .../app/views/common/rolling_restart_view.js    |  19 +-
 .../views/main/dashboard/widgets/hdfs_links.js  |   2 +-
 ambari-web/app/views/main/service/item.js       |   3 +-
 .../service/widgets/create/expression_view.js   |  16 +-
 ambari-web/brunch-config.js                     |   3 +-
 .../resourceManager/wizard_controller_test.js   |   1 -
 .../common/configs/configs_loader_test.js       |  10 +-
 ambari-web/test/models/cluster_test.js          |  12 +-
 .../objects/service_config_property_test.js     |  31 +-
 .../configs/theme/sub_section_tab_test.js       |   2 +-
 .../test/views/main/host/log_metrics_test.js    |   1 -
 ambari-web/test/views/main/host_test.js         |   4 +-
 ambari-web/vendor/scripts/pluralize.js          | 461 ++++++++++++++++
 .../MICROSOFT_R/8.0.0/metainfo.xml              |   4 +-
 .../MICROSOFT_R/8.0.0/service_advisor.py        |  22 +-
 59 files changed, 2514 insertions(+), 660 deletions(-)
----------------------------------------------------------------------



[10/17] ambari git commit: AMBARI-18475 - Remove Global Cluster Lock Shared Between Business Objects (jonathanhurley)

Posted by jo...@apache.org.
AMBARI-18475 - Remove Global Cluster Lock Shared Between Business Objects (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/561c6f2f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/561c6f2f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/561c6f2f

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 561c6f2f38f9b262dda4acd7ff0526b7caf55bce
Parents: 8192601
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Sep 27 11:44:12 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Sep 27 15:34:40 2016 -0400

----------------------------------------------------------------------
 .../ambari/annotations/ExperimentalFeature.java |   8 +-
 .../AmbariManagementControllerImpl.java         |  30 +-
 .../alerts/AlertServiceStateListener.java       | 122 ++++---
 .../org/apache/ambari/server/state/Cluster.java |   7 -
 .../apache/ambari/server/state/ConfigImpl.java  |  98 +++--
 .../org/apache/ambari/server/state/Service.java |   7 -
 .../ambari/server/state/ServiceComponent.java   |   7 -
 .../server/state/ServiceComponentImpl.java      | 364 +++++++------------
 .../apache/ambari/server/state/ServiceImpl.java | 306 ++++++----------
 .../server/state/cluster/ClusterImpl.java       |   6 -
 .../state/configgroup/ConfigGroupImpl.java      |  92 ++---
 .../svccomphost/ServiceComponentHostImpl.java   | 227 +++++-------
 .../server/update/HostUpdateHelperTest.java     |  40 +-
 13 files changed, 522 insertions(+), 792 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java b/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java
index 1d5ba0e..7798f26 100644
--- a/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java
+++ b/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java
@@ -18,6 +18,7 @@
 package org.apache.ambari.annotations;
 
 import java.util.concurrent.Executor;
+import java.util.concurrent.locks.Lock;
 
 /**
  * The {@link ExperimentalFeature} enumeration is meant to be used with the
@@ -40,5 +41,10 @@ public enum ExperimentalFeature {
   /**
    * Used for code that is targeted for patch upgrades
    */
-  PATCH_UPGRADES
+  PATCH_UPGRADES,
+
+  /**
+   * The removal of the cluster global {@link Lock}
+   */
+  CLUSTER_GLOBAL_LOCK_REMOVAL
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 1fc9dbf..ac680a5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -63,7 +63,6 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.Lock;
 
 import javax.persistence.RollbackException;
 
@@ -202,6 +201,7 @@ import org.slf4j.LoggerFactory;
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Multimap;
 import com.google.gson.Gson;
 import com.google.gson.reflect.TypeToken;
@@ -209,7 +209,6 @@ import com.google.inject.Inject;
 import com.google.inject.Injector;
 import com.google.inject.Singleton;
 import com.google.inject.persist.Transactional;
-import com.google.common.collect.ListMultimap;
 
 @Singleton
 public class AmbariManagementControllerImpl implements AmbariManagementController {
@@ -3111,13 +3110,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         changedHosts, requestParameters, requestProperties,
         runSmokeTest, reconfigureClients);
 
-    Lock clusterWriteLock = cluster.getClusterGlobalLock().writeLock();
-    clusterWriteLock.lock();
-    try {
-      updateServiceStates(cluster, changedServices, changedComponents, changedHosts, ignoredHosts);
-    } finally {
-      clusterWriteLock.unlock();
-    }
+    updateServiceStates(cluster, changedServices, changedComponents, changedHosts, ignoredHosts);
+
     return requestStages;
   }
 
@@ -5166,13 +5160,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
     StackInfo stackInfo = ambariMetaInfo.getStack(linkEntity.getStack().getStackName(), linkEntity.getStack().getStackVersion());
 
-    if (stackInfo == null)
+    if (stackInfo == null) {
       throw new StackAccessException("stackName=" + linkEntity.getStack().getStackName() + ", stackVersion=" + linkEntity.getStack().getStackVersion());
+    }
 
     ExtensionInfo extensionInfo = ambariMetaInfo.getExtension(linkEntity.getExtension().getExtensionName(), linkEntity.getExtension().getExtensionVersion());
 
-    if (extensionInfo == null)
+    if (extensionInfo == null) {
       throw new StackAccessException("extensionName=" + linkEntity.getExtension().getExtensionName() + ", extensionVersion=" + linkEntity.getExtension().getExtensionVersion());
+    }
 
     ExtensionHelper.validateDeleteLink(getClusters(), stackInfo, extensionInfo);
     ambariMetaInfo.getStackManager().unlinkStackAndExtension(stackInfo, extensionInfo);
@@ -5202,13 +5198,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
     StackInfo stackInfo = ambariMetaInfo.getStack(request.getStackName(), request.getStackVersion());
 
-    if (stackInfo == null)
+    if (stackInfo == null) {
       throw new StackAccessException("stackName=" + request.getStackName() + ", stackVersion=" + request.getStackVersion());
+    }
 
     ExtensionInfo extensionInfo = ambariMetaInfo.getExtension(request.getExtensionName(), request.getExtensionVersion());
 
-    if (extensionInfo == null)
+    if (extensionInfo == null) {
       throw new StackAccessException("extensionName=" + request.getExtensionName() + ", extensionVersion=" + request.getExtensionVersion());
+    }
 
     ExtensionHelper.validateCreateLink(stackInfo, extensionInfo);
     ExtensionLinkEntity linkEntity = createExtensionLinkEntity(request);
@@ -5265,13 +5263,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   public void updateExtensionLink(ExtensionLinkEntity linkEntity) throws AmbariException {
     StackInfo stackInfo = ambariMetaInfo.getStack(linkEntity.getStack().getStackName(), linkEntity.getStack().getStackVersion());
 
-    if (stackInfo == null)
+    if (stackInfo == null) {
       throw new StackAccessException("stackName=" + linkEntity.getStack().getStackName() + ", stackVersion=" + linkEntity.getStack().getStackVersion());
+    }
 
     ExtensionInfo extensionInfo = ambariMetaInfo.getExtension(linkEntity.getExtension().getExtensionName(), linkEntity.getExtension().getExtensionVersion());
 
-    if (extensionInfo == null)
+    if (extensionInfo == null) {
       throw new StackAccessException("extensionName=" + linkEntity.getExtension().getExtensionName() + ", extensionVersion=" + linkEntity.getExtension().getExtensionVersion());
+    }
 
     ambariMetaInfo.getStackManager().linkStackToExtension(stackInfo, extensionInfo);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertServiceStateListener.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertServiceStateListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertServiceStateListener.java
index da4cbf5..6f6cea8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertServiceStateListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertServiceStateListener.java
@@ -20,6 +20,7 @@ package org.apache.ambari.server.events.listeners.alerts;
 import java.text.MessageFormat;
 import java.util.List;
 import java.util.Set;
+import java.util.concurrent.locks.Lock;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.EagerSingleton;
@@ -34,7 +35,6 @@ import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
 import org.apache.ambari.server.orm.dao.AlertDispatchDAO;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
 import org.apache.ambari.server.orm.entities.AlertGroupEntity;
-import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.alert.AlertDefinition;
 import org.apache.ambari.server.state.alert.AlertDefinitionFactory;
@@ -43,6 +43,7 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.eventbus.AllowConcurrentEvents;
 import com.google.common.eventbus.Subscribe;
+import com.google.common.util.concurrent.Striped;
 import com.google.inject.Inject;
 import com.google.inject.Provider;
 import com.google.inject.Singleton;
@@ -95,7 +96,13 @@ public class AlertServiceStateListener {
    * Used to retrieve a cluster using clusterId from event.
    */
   @Inject
-  private Provider<Clusters> clusters;
+  private Provider<Clusters> m_clusters;
+
+  /**
+   * Used for ensuring that the concurrent nature of the event handler methods
+   * don't collide when attempting to perform operations on the same service.
+   */
+  private Striped<Lock> m_locksByService = Striped.lazyWeakLock(20);
 
   /**
    * Constructor.
@@ -125,38 +132,46 @@ public class AlertServiceStateListener {
     String stackVersion = event.getStackVersion();
     String serviceName = event.getServiceName();
 
-    // create the default alert group for the new service if absent; this MUST
-    // be done before adding definitions so that they are properly added to the
-    // default group
-    if (null == m_alertDispatchDao.findDefaultServiceGroup(clusterId, serviceName)) {
-      try {
-        m_alertDispatchDao.createDefaultGroup(clusterId, serviceName);
-      } catch (AmbariException ambariException) {
-        LOG.error("Unable to create a default alert group for {}",
-          event.getServiceName(), ambariException);
-      }
-    }
+    Lock lock = m_locksByService.get(serviceName);
+    lock.lock();
 
-    // populate alert definitions for the new service from the database, but
-    // don't worry about sending down commands to the agents; the host
-    // components are not yet bound to the hosts so we'd have no way of knowing
-    // which hosts are invalidated; do that in another impl
     try {
-      Set<AlertDefinition> alertDefinitions = m_metaInfoProvider.get().getAlertDefinitions(
-          stackName, stackVersion, serviceName);
+      // create the default alert group for the new service if absent; this MUST
+      // be done before adding definitions so that they are properly added to the
+      // default group
+      if (null == m_alertDispatchDao.findDefaultServiceGroup(clusterId, serviceName)) {
+        try {
+          m_alertDispatchDao.createDefaultGroup(clusterId, serviceName);
+        } catch (AmbariException ambariException) {
+          LOG.error("Unable to create a default alert group for {}",
+            event.getServiceName(), ambariException);
+        }
+      }
 
-      for (AlertDefinition definition : alertDefinitions) {
-        AlertDefinitionEntity entity = m_alertDefinitionFactory.coerce(
-            clusterId,
-            definition);
+      // populate alert definitions for the new service from the database, but
+      // don't worry about sending down commands to the agents; the host
+      // components are not yet bound to the hosts so we'd have no way of knowing
+      // which hosts are invalidated; do that in another impl
+      try {
+        Set<AlertDefinition> alertDefinitions = m_metaInfoProvider.get().getAlertDefinitions(
+            stackName, stackVersion, serviceName);
 
-        m_definitionDao.create(entity);
+        for (AlertDefinition definition : alertDefinitions) {
+          AlertDefinitionEntity entity = m_alertDefinitionFactory.coerce(
+              clusterId,
+              definition);
+
+          m_definitionDao.create(entity);
+        }
+      } catch (AmbariException ae) {
+        String message = MessageFormat.format(
+            "Unable to populate alert definitions from the database during installation of {0}",
+            serviceName);
+        LOG.error(message, ae);
       }
-    } catch (AmbariException ae) {
-      String message = MessageFormat.format(
-          "Unable to populate alert definitions from the database during installation of {0}",
-          serviceName);
-      LOG.error(message, ae);
+    }
+    finally {
+      lock.unlock();
     }
   }
 
@@ -170,43 +185,44 @@ public class AlertServiceStateListener {
   @AllowConcurrentEvents
   public void onAmbariEvent(ServiceRemovedEvent event) {
     LOG.debug("Received event {}", event);
-    Cluster cluster = null;
 
     try {
-      cluster = clusters.get().getClusterById(event.getClusterId());
+      m_clusters.get().getClusterById(event.getClusterId());
     } catch (AmbariException e) {
-      LOG.warn("Unable to retrieve cluster info for id: " + event.getClusterId());
+      LOG.warn("Unable to retrieve cluster with id {}", event.getClusterId());
+      return;
     }
 
-    if (cluster != null) {
-      // TODO: Explicit locking used to prevent deadlock situation caused during cluster delete
-      cluster.getClusterGlobalLock().writeLock().lock();
-      try {
-        List<AlertDefinitionEntity> definitions = m_definitionDao.findByService(event.getClusterId(),
+    String serviceName = event.getServiceName();
+    Lock lock = m_locksByService.get(serviceName);
+    lock.lock();
+
+    try {
+      List<AlertDefinitionEntity> definitions = m_definitionDao.findByService(event.getClusterId(),
           event.getServiceName());
 
-        for (AlertDefinitionEntity definition : definitions) {
-          try {
-            m_definitionDao.remove(definition);
-          } catch (Exception exception) {
-            LOG.error("Unable to remove alert definition {}", definition.getDefinitionName(), exception);
-          }
+      for (AlertDefinitionEntity definition : definitions) {
+        try {
+          m_definitionDao.remove(definition);
+        } catch (Exception exception) {
+          LOG.error("Unable to remove alert definition {}", definition.getDefinitionName(),
+              exception);
         }
+      }
 
-        // remove the default group for the service
-        AlertGroupEntity group = m_alertDispatchDao.findGroupByName(event.getClusterId(),
+      // remove the default group for the service
+      AlertGroupEntity group = m_alertDispatchDao.findGroupByName(event.getClusterId(),
           event.getServiceName());
 
-        if (null != group && group.isDefault()) {
-          try {
-            m_alertDispatchDao.remove(group);
-          } catch (Exception exception) {
-            LOG.error("Unable to remove default alert group {}", group.getGroupName(), exception);
-          }
+      if (null != group && group.isDefault()) {
+        try {
+          m_alertDispatchDao.remove(group);
+        } catch (Exception exception) {
+          LOG.error("Unable to remove default alert group {}", group.getGroupName(), exception);
         }
-      } finally {
-        cluster.getClusterGlobalLock().writeLock().unlock();
       }
+    } finally {
+      lock.unlock();
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
index 2452df6..d141df8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
@@ -22,7 +22,6 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.concurrent.locks.ReadWriteLock;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.ClusterResponse;
@@ -522,12 +521,6 @@ public interface Cluster {
   Service addService(String serviceName) throws AmbariException;
 
   /**
-   * Get lock to control access to cluster structure
-   * @return cluster-global lock
-   */
-  ReadWriteLock getClusterGlobalLock();
-
-  /**
    * Fetch desired configs for list of hosts in cluster
    * @param hostIds
    * @return

http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
index 7b7a60b..28bcd5f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
@@ -27,9 +27,6 @@ import java.util.Set;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.ambari.annotations.TransactionalLock;
-import org.apache.ambari.annotations.TransactionalLock.LockArea;
-import org.apache.ambari.annotations.TransactionalLock.LockType;
 import org.apache.ambari.server.events.ClusterConfigChangedEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
@@ -365,69 +362,64 @@ public class ConfigImpl implements Config {
   @Override
   @Transactional
   public void persist(boolean newConfig) {
-    cluster.getClusterGlobalLock().writeLock().lock(); //null cluster is not expected, NPE anyway later in code
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
-
-        if (newConfig) {
-          ClusterConfigEntity entity = new ClusterConfigEntity();
-          entity.setClusterEntity(clusterEntity);
-          entity.setClusterId(cluster.getClusterId());
-          entity.setType(getType());
-          entity.setVersion(getVersion());
-          entity.setTag(getTag());
-          entity.setTimestamp(new Date().getTime());
-          entity.setStack(clusterEntity.getDesiredStack());
-          entity.setData(gson.toJson(getProperties()));
+      ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
+
+      if (newConfig) {
+        ClusterConfigEntity entity = new ClusterConfigEntity();
+        entity.setClusterEntity(clusterEntity);
+        entity.setClusterId(cluster.getClusterId());
+        entity.setType(getType());
+        entity.setVersion(getVersion());
+        entity.setTag(getTag());
+        entity.setTimestamp(new Date().getTime());
+        entity.setStack(clusterEntity.getDesiredStack());
+        entity.setData(gson.toJson(getProperties()));
+
+        if (null != getPropertiesAttributes()) {
+          entity.setAttributes(gson.toJson(getPropertiesAttributes()));
+        }
 
-          if (null != getPropertiesAttributes()) {
-            entity.setAttributes(gson.toJson(getPropertiesAttributes()));
+        clusterDAO.createConfig(entity);
+        clusterEntity.getClusterConfigEntities().add(entity);
+
+        // save the entity, forcing a flush to ensure the refresh picks up the
+        // newest data
+        clusterDAO.merge(clusterEntity, true);
+      } else {
+        // only supporting changes to the properties
+        ClusterConfigEntity entity = null;
+
+        // find the existing configuration to update
+        for (ClusterConfigEntity cfe : clusterEntity.getClusterConfigEntities()) {
+          if (getTag().equals(cfe.getTag()) && getType().equals(cfe.getType())
+              && getVersion().equals(cfe.getVersion())) {
+            entity = cfe;
+            break;
           }
+        }
+
+        // if the configuration was found, then update it
+        if (null != entity) {
+          LOG.debug(
+              "Updating {} version {} with new configurations; a new version will not be created",
+              getType(), getVersion());
 
-          clusterDAO.createConfig(entity);
-          clusterEntity.getClusterConfigEntities().add(entity);
+          entity.setData(gson.toJson(getProperties()));
 
           // save the entity, forcing a flush to ensure the refresh picks up the
           // newest data
           clusterDAO.merge(clusterEntity, true);
-          cluster.refresh();
-        } else {
-          // only supporting changes to the properties
-          ClusterConfigEntity entity = null;
-
-          // find the existing configuration to update
-          for (ClusterConfigEntity cfe : clusterEntity.getClusterConfigEntities()) {
-            if (getTag().equals(cfe.getTag()) &&
-                getType().equals(cfe.getType()) &&
-                getVersion().equals(cfe.getVersion())) {
-              entity = cfe;
-              break;
-            }
-          }
-
-          // if the configuration was found, then update it
-          if (null != entity) {
-            LOG.debug(
-                    "Updating {} version {} with new configurations; a new version will not be created",
-                    getType(), getVersion());
-
-            entity.setData(gson.toJson(getProperties()));
-
-            // save the entity, forcing a flush to ensure the refresh picks up the
-            // newest data
-            clusterDAO.merge(clusterEntity, true);
-            cluster.refresh();
-          }
         }
-      } finally {
-        readWriteLock.writeLock().unlock();
       }
     } finally {
-      cluster.getClusterGlobalLock().writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
 
+    // re-load the entity associations for the cluster
+    cluster.refresh();
+
     // broadcast the change event for the configuration
     ClusterConfigChangedEvent event = new ClusterConfigChangedEvent(cluster.getClusterName(),
         getType(), getTag(), getVersion());

http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
index 7000574..48ab252 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
@@ -19,7 +19,6 @@
 package org.apache.ambari.server.state;
 
 import java.util.Map;
-import java.util.concurrent.locks.ReadWriteLock;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.ServiceResponse;
@@ -99,12 +98,6 @@ public interface Service {
   void delete() throws AmbariException;
 
   /**
-   * Get lock to control access to cluster structure
-   * @return cluster-global lock
-   */
-  ReadWriteLock getClusterGlobalLock();
-
-  /**
    * Sets the maintenance state for the service
    * @param state the state
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
index 983cbdf..8387ab8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
@@ -19,7 +19,6 @@
 package org.apache.ambari.server.state;
 
 import java.util.Map;
-import java.util.concurrent.locks.ReadWriteLock;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.ServiceComponentResponse;
@@ -98,10 +97,4 @@ public interface ServiceComponent {
       String hostName) throws AmbariException;
 
   void delete() throws AmbariException;
-
-  /**
-   * Get lock to control access to cluster structure
-   * @return cluster-global lock
-   */
-  ReadWriteLock getClusterGlobalLock();
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
index 3e805a0..282396d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
@@ -18,12 +18,16 @@
 
 package org.apache.ambari.server.state;
 
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.ProvisionException;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-import com.google.inject.persist.Transactional;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ObjectNotFoundException;
 import org.apache.ambari.server.ServiceComponentHostNotFoundException;
@@ -46,18 +50,18 @@ import org.apache.ambari.server.state.cluster.ClusterImpl;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.ProvisionException;
+import com.google.inject.assistedinject.Assisted;
+import com.google.inject.assistedinject.AssistedInject;
+import com.google.inject.persist.Transactional;
 
 public class ServiceComponentImpl implements ServiceComponent {
 
   private final static Logger LOG =
       LoggerFactory.getLogger(ServiceComponentImpl.class);
   private final Service service;
-  private final ReadWriteLock clusterGlobalLock;
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
   private final String componentName;
   private final String displayName;
@@ -79,7 +83,7 @@ public class ServiceComponentImpl implements ServiceComponent {
   private AmbariEventPublisher eventPublisher;
 
   ServiceComponentDesiredStateEntity desiredStateEntity;
-  private Map<String, ServiceComponentHost> hostComponents;
+  private ConcurrentMap<String, ServiceComponentHost> hostComponents;
 
   /**
    * Data access object used for lookup up stacks.
@@ -91,7 +95,6 @@ public class ServiceComponentImpl implements ServiceComponent {
   public ServiceComponentImpl(@Assisted Service service,
                               @Assisted String componentName, Injector injector) throws AmbariException {
     injector.injectMembers(this);
-    clusterGlobalLock = service.getClusterGlobalLock();
     this.service = service;
 
     desiredStateEntity = new ServiceComponentDesiredStateEntity();
@@ -103,7 +106,7 @@ public class ServiceComponentImpl implements ServiceComponent {
     desiredStateEntity.setRecoveryEnabled(false);
     setDesiredStackVersion(service.getDesiredStackVersion());
 
-    hostComponents = new HashMap<String, ServiceComponentHost>();
+    hostComponents = new ConcurrentHashMap<String, ServiceComponentHost>();
 
     StackId stackId = service.getDesiredStackVersion();
     try {
@@ -129,7 +132,6 @@ public class ServiceComponentImpl implements ServiceComponent {
                               @Assisted ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity,
                               Injector injector) throws AmbariException {
     injector.injectMembers(this);
-    clusterGlobalLock = service.getClusterGlobalLock();
     this.service = service;
 
     desiredStateEntity = serviceComponentDesiredStateEntity;
@@ -153,7 +155,7 @@ public class ServiceComponentImpl implements ServiceComponent {
         + ", stackInfo=" + stackId.getStackId());
     }
 
-    hostComponents = new HashMap<String, ServiceComponentHost>();
+    hostComponents = new ConcurrentHashMap<String, ServiceComponentHost>();
     for (HostComponentStateEntity hostComponentStateEntity : desiredStateEntity.getHostComponentStateEntities()) {
       HostComponentDesiredStateEntityPK pk = new HostComponentDesiredStateEntityPK();
       pk.setClusterId(hostComponentStateEntity.getClusterId());
@@ -179,11 +181,6 @@ public class ServiceComponentImpl implements ServiceComponent {
   }
 
   @Override
-  public ReadWriteLock getClusterGlobalLock() {
-    return clusterGlobalLock;
-  }
-
-  @Override
   public String getName() {
     return componentName;
   }
@@ -254,145 +251,84 @@ public class ServiceComponentImpl implements ServiceComponent {
   }
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public Map<String, ServiceComponentHost> getServiceComponentHosts() {
-    clusterGlobalLock.readLock().lock();
-    try {
-      readWriteLock.readLock().lock();
-      try {
-        return new HashMap<String, ServiceComponentHost>(hostComponents);
-      } finally {
-        readWriteLock.readLock().unlock();
-      }
-    } finally {
-      clusterGlobalLock.readLock().unlock();
-    }
+    return new HashMap<String, ServiceComponentHost>(hostComponents);
   }
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public void addServiceComponentHosts(
       Map<String, ServiceComponentHost> hostComponents) throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        // TODO validation
-        for (Entry<String, ServiceComponentHost> entry :
-            hostComponents.entrySet()) {
-          if (!entry.getKey().equals(entry.getValue().getHostName())) {
-            throw new AmbariException("Invalid arguments in map"
-                + ", hostname does not match the key in map");
-          }
+      // TODO validation
+      for (Entry<String, ServiceComponentHost> entry :
+          hostComponents.entrySet()) {
+        if (!entry.getKey().equals(entry.getValue().getHostName())) {
+          throw new AmbariException("Invalid arguments in map"
+              + ", hostname does not match the key in map");
         }
-        for (ServiceComponentHost sch : hostComponents.values()) {
-          addServiceComponentHost(sch);
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
+      }
+      for (ServiceComponentHost sch : hostComponents.values()) {
+        addServiceComponentHost(sch);
       }
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
   }
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public void addServiceComponentHost(
       ServiceComponentHost hostComponent) throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        // TODO validation
-        // TODO ensure host belongs to cluster
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Adding a ServiceComponentHost to ServiceComponent"
-              + ", clusterName=" + service.getCluster().getClusterName()
-              + ", clusterId=" + service.getCluster().getClusterId()
-              + ", serviceName=" + service.getName()
-              + ", serviceComponentName=" + getName()
-              + ", hostname=" + hostComponent.getHostName()
-              + ", recoveryEnabled=" + isRecoveryEnabled());
-        }
-        if (hostComponents.containsKey(hostComponent.getHostName())) {
-          throw new AmbariException("Cannot add duplicate ServiceComponentHost"
-              + ", clusterName=" + service.getCluster().getClusterName()
-              + ", clusterId=" + service.getCluster().getClusterId()
-              + ", serviceName=" + service.getName()
-              + ", serviceComponentName=" + getName()
-              + ", hostname=" + hostComponent.getHostName()
-              + ", recoveryEnabled=" + isRecoveryEnabled());
-        }
-        // FIXME need a better approach of caching components by host
-        ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
-        clusterImpl.addServiceComponentHost(hostComponent);
-        hostComponents.put(hostComponent.getHostName(), hostComponent);
-      } finally {
-        readWriteLock.writeLock().unlock();
+      // TODO validation
+      // TODO ensure host belongs to cluster
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Adding a ServiceComponentHost to ServiceComponent" + ", clusterName="
+            + service.getCluster().getClusterName() + ", clusterId="
+            + service.getCluster().getClusterId() + ", serviceName=" + service.getName()
+            + ", serviceComponentName=" + getName() + ", hostname=" + hostComponent.getHostName()
+            + ", recoveryEnabled=" + isRecoveryEnabled());
+      }
+
+      if (hostComponents.containsKey(hostComponent.getHostName())) {
+        throw new AmbariException("Cannot add duplicate ServiceComponentHost" + ", clusterName="
+            + service.getCluster().getClusterName() + ", clusterId="
+            + service.getCluster().getClusterId() + ", serviceName=" + service.getName()
+            + ", serviceComponentName=" + getName() + ", hostname=" + hostComponent.getHostName()
+            + ", recoveryEnabled=" + isRecoveryEnabled());
       }
+      // FIXME need a better approach of caching components by host
+      ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
+      clusterImpl.addServiceComponentHost(hostComponent);
+      hostComponents.put(hostComponent.getHostName(), hostComponent);
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
   }
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public ServiceComponentHost addServiceComponentHost(String hostName) throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
-    try {
-      readWriteLock.writeLock().lock();
-      try {
-        // TODO validation
-        // TODO ensure host belongs to cluster
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Adding a ServiceComponentHost to ServiceComponent"
-              + ", clusterName=" + service.getCluster().getClusterName()
-              + ", clusterId=" + service.getCluster().getClusterId()
-              + ", serviceName=" + service.getName()
-              + ", serviceComponentName=" + getName()
-              + ", recoveryEnabled=" + isRecoveryEnabled()
-              + ", hostname=" + hostName);
-        }
-        if (hostComponents.containsKey(hostName)) {
-          throw new AmbariException("Cannot add duplicate ServiceComponentHost"
-              + ", clusterName=" + service.getCluster().getClusterName()
-              + ", clusterId=" + service.getCluster().getClusterId()
-              + ", serviceName=" + service.getName()
-              + ", serviceComponentName=" + getName()
-              + ", recoveryEnabled=" + isRecoveryEnabled()
-              + ", hostname=" + hostName);
-        }
-        ServiceComponentHost hostComponent = serviceComponentHostFactory.createNew(this, hostName);
-        // FIXME need a better approach of caching components by host
-        ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
-        clusterImpl.addServiceComponentHost(hostComponent);
-
-        hostComponents.put(hostComponent.getHostName(), hostComponent);
-
-        return hostComponent;
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    } finally {
-      clusterGlobalLock.writeLock().unlock();
-    }
+    ServiceComponentHost hostComponent = serviceComponentHostFactory.createNew(this, hostName);
+    addServiceComponentHost(hostComponent);
+    return hostComponent;
   }
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public ServiceComponentHost getServiceComponentHost(String hostname)
       throws AmbariException {
-    clusterGlobalLock.readLock().lock();
-    try {
-      readWriteLock.readLock().lock();
-      try {
-        if (!hostComponents.containsKey(hostname)) {
-          throw new ServiceComponentHostNotFoundException(getClusterName(),
-              getServiceName(), getName(), hostname);
-        }
-        return hostComponents.get(hostname);
-      } finally {
-        readWriteLock.readLock().unlock();
-      }
-    } finally {
-      clusterGlobalLock.readLock().unlock();
+    
+    if (!hostComponents.containsKey(hostname)) {
+      throw new ServiceComponentHostNotFoundException(getClusterName(),
+          getServiceName(), getName(), hostname);
     }
+    
+    return hostComponents.get(hostname);
   }
 
   @Override
@@ -580,38 +516,20 @@ public class ServiceComponentImpl implements ServiceComponent {
    * transaction is not necessary before this calling this method.
    */
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public void persist() {
-    boolean clusterWriteLockAcquired = false;
-    if (!persisted) {
-      clusterGlobalLock.writeLock().lock();
-      clusterWriteLockAcquired = true;
-    }
-
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        if (!persisted) {
-          // persist the new cluster topology and then release the cluster lock
-          // as it has no more bearing on the rest of this persist() method
-          persistEntities();
-          clusterGlobalLock.writeLock().unlock();
-          clusterWriteLockAcquired = false;
-
-          refresh();
-          // There refresh calls are no longer needed with cached references
-          // not used on getters/setters
-          // service.refresh();
-          persisted = true;
-        } else {
-          saveIfPersisted(desiredStateEntity);
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
+      if (!persisted) {
+        // persist the new cluster topology
+        persistEntities();
+        refresh();
+        persisted = true;
+      } else {
+        saveIfPersisted(desiredStateEntity);
       }
     } finally {
-      if (clusterWriteLockAcquired) {
-        clusterGlobalLock.writeLock().unlock();
-      }
+      readWriteLock.writeLock().unlock();
     }
   }
 
@@ -671,123 +589,95 @@ public class ServiceComponentImpl implements ServiceComponent {
   }
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public boolean canBeRemoved() {
-    clusterGlobalLock.readLock().lock();
+    readWriteLock.readLock().lock();
     try {
-      readWriteLock.readLock().lock();
-      try {
-        // A component can be deleted if all it's host components
-        // can be removed, irrespective of the state of
-        // the component itself
-        for (ServiceComponentHost sch : hostComponents.values()) {
-          if (!sch.canBeRemoved()) {
-            LOG.warn("Found non removable hostcomponent when trying to"
-                + " delete service component"
-                + ", clusterName=" + getClusterName()
-                + ", serviceName=" + getServiceName()
-                + ", componentName=" + getName()
-                + ", state=" + sch.getState()
-                + ", hostname=" + sch.getHostName());
-            return false;
-          }
+      // A component can be deleted if all it's host components
+      // can be removed, irrespective of the state of
+      // the component itself
+      for (ServiceComponentHost sch : hostComponents.values()) {
+        if (!sch.canBeRemoved()) {
+          LOG.warn("Found non removable hostcomponent when trying to" + " delete service component"
+              + ", clusterName=" + getClusterName() + ", serviceName=" + getServiceName()
+              + ", componentName=" + getName() + ", state=" + sch.getState() + ", hostname="
+              + sch.getHostName());
+          return false;
         }
-        return true;
-      } finally {
-        readWriteLock.readLock().unlock();
       }
+      return true;
     } finally {
-      clusterGlobalLock.readLock().unlock();
+      readWriteLock.readLock().unlock();
     }
   }
 
   @Override
   @Transactional
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public void deleteAllServiceComponentHosts() throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        LOG.info("Deleting all servicecomponenthosts for component"
-            + ", clusterName=" + getClusterName()
-            + ", serviceName=" + getServiceName()
-            + ", componentName=" + getName()
-            + ", recoveryEnabled=" + isRecoveryEnabled());
-        for (ServiceComponentHost sch : hostComponents.values()) {
-          if (!sch.canBeRemoved()) {
-            throw new AmbariException("Found non removable hostcomponent "
-                + " when trying to delete"
-                + " all hostcomponents from servicecomponent"
-                + ", clusterName=" + getClusterName()
-                + ", serviceName=" + getServiceName()
-                + ", componentName=" + getName()
-                + ", recoveryEnabled=" + isRecoveryEnabled()
-                + ", hostname=" + sch.getHostName());
-          }
-        }
-
-        for (ServiceComponentHost serviceComponentHost : hostComponents.values()) {
-          serviceComponentHost.delete();
+      LOG.info("Deleting all servicecomponenthosts for component" + ", clusterName="
+          + getClusterName() + ", serviceName=" + getServiceName() + ", componentName=" + getName()
+          + ", recoveryEnabled=" + isRecoveryEnabled());
+      for (ServiceComponentHost sch : hostComponents.values()) {
+        if (!sch.canBeRemoved()) {
+          throw new AmbariException("Found non removable hostcomponent " + " when trying to delete"
+              + " all hostcomponents from servicecomponent" + ", clusterName=" + getClusterName()
+              + ", serviceName=" + getServiceName() + ", componentName=" + getName()
+              + ", recoveryEnabled=" + isRecoveryEnabled() + ", hostname=" + sch.getHostName());
         }
+      }
 
-        hostComponents.clear();
-      } finally {
-        readWriteLock.writeLock().unlock();
+      for (ServiceComponentHost serviceComponentHost : hostComponents.values()) {
+        serviceComponentHost.delete();
       }
+
+      hostComponents.clear();
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
   }
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public void deleteServiceComponentHosts(String hostname) throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        ServiceComponentHost sch = getServiceComponentHost(hostname);
-        LOG.info("Deleting servicecomponenthost for cluster"
+      ServiceComponentHost sch = getServiceComponentHost(hostname);
+      LOG.info("Deleting servicecomponenthost for cluster" + ", clusterName=" + getClusterName()
+          + ", serviceName=" + getServiceName() + ", componentName=" + getName()
+          + ", recoveryEnabled=" + isRecoveryEnabled() + ", hostname=" + sch.getHostName());
+      if (!sch.canBeRemoved()) {
+        throw new AmbariException("Could not delete hostcomponent from cluster"
             + ", clusterName=" + getClusterName()
             + ", serviceName=" + getServiceName()
             + ", componentName=" + getName()
             + ", recoveryEnabled=" + isRecoveryEnabled()
             + ", hostname=" + sch.getHostName());
-        if (!sch.canBeRemoved()) {
-          throw new AmbariException("Could not delete hostcomponent from cluster"
-              + ", clusterName=" + getClusterName()
-              + ", serviceName=" + getServiceName()
-              + ", componentName=" + getName()
-              + ", recoveryEnabled=" + isRecoveryEnabled()
-              + ", hostname=" + sch.getHostName());
-        }
-        sch.delete();
-        hostComponents.remove(hostname);
-
-      } finally {
-        readWriteLock.writeLock().unlock();
       }
+      sch.delete();
+      hostComponents.remove(hostname);
+
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
   }
 
   @Override
   @Transactional
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public void delete() throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        deleteAllServiceComponentHosts();
+      deleteAllServiceComponentHosts();
 
-        if (persisted) {
-          removeEntities();
-          persisted = false;
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
+      if (persisted) {
+        removeEntities();
+        persisted = false;
       }
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
index 3120b86..36d4902 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
@@ -18,12 +18,15 @@
 
 package org.apache.ambari.server.state;
 
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.ProvisionException;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-import com.google.inject.persist.Transactional;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -43,7 +46,6 @@ import org.apache.ambari.server.orm.entities.ClusterConfigMappingEntity;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
 import org.apache.ambari.server.orm.entities.ClusterServiceEntityPK;
-import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
 import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.ServiceConfigEntity;
 import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
@@ -51,19 +53,16 @@ import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntityPK;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.ProvisionException;
+import com.google.inject.assistedinject.Assisted;
+import com.google.inject.assistedinject.AssistedInject;
+import com.google.inject.persist.Transactional;
 
 
 public class ServiceImpl implements Service {
-  private final ReadWriteLock clusterGlobalLock;
   private ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
   // Cached entity has only 1 getter for name
   private ClusterServiceEntity serviceEntity;
@@ -113,7 +112,6 @@ public class ServiceImpl implements Service {
   public ServiceImpl(@Assisted Cluster cluster, @Assisted String serviceName,
       Injector injector) throws AmbariException {
     injector.injectMembers(this);
-    clusterGlobalLock = cluster.getClusterGlobalLock();
     serviceEntity = new ClusterServiceEntity();
     serviceEntity.setClusterId(cluster.getClusterId());
     serviceEntity.setServiceName(serviceName);
@@ -145,7 +143,6 @@ public class ServiceImpl implements Service {
   public ServiceImpl(@Assisted Cluster cluster, @Assisted ClusterServiceEntity
       serviceEntity, Injector injector) throws AmbariException {
     injector.injectMembers(this);
-    clusterGlobalLock = cluster.getClusterGlobalLock();
     this.serviceEntity = serviceEntity;
     this.cluster = cluster;
 
@@ -182,11 +179,6 @@ public class ServiceImpl implements Service {
   }
 
   @Override
-  public ReadWriteLock getClusterGlobalLock() {
-    return clusterGlobalLock;
-  }
-
-  @Override
   public String getName() {
     return serviceEntity.getServiceName();
   }
@@ -207,83 +199,35 @@ public class ServiceImpl implements Service {
   }
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public void addServiceComponents(
       Map<String, ServiceComponent> components) throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
-    try {
-      readWriteLock.writeLock().lock();
-      try {
-        for (ServiceComponent sc : components.values()) {
-          addServiceComponent(sc);
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    } finally {
-      clusterGlobalLock.writeLock().unlock();
+    for (ServiceComponent sc : components.values()) {
+      addServiceComponent(sc);
     }
   }
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public void addServiceComponent(ServiceComponent component) throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
-    try {
-      readWriteLock.writeLock().lock();
-      try {
-        // TODO validation
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Adding a ServiceComponent to Service"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", clusterId=" + cluster.getClusterId()
-              + ", serviceName=" + getName()
-              + ", serviceComponentName=" + component.getName());
-        }
-        if (components.containsKey(component.getName())) {
-          throw new AmbariException("Cannot add duplicate ServiceComponent"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", clusterId=" + cluster.getClusterId()
-              + ", serviceName=" + getName()
-              + ", serviceComponentName=" + component.getName());
-        }
-        components.put(component.getName(), component);
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    } finally {
-      clusterGlobalLock.writeLock().unlock();
+    if (components.containsKey(component.getName())) {
+      throw new AmbariException("Cannot add duplicate ServiceComponent"
+          + ", clusterName=" + cluster.getClusterName()
+          + ", clusterId=" + cluster.getClusterId()
+          + ", serviceName=" + getName()
+          + ", serviceComponentName=" + component.getName());
     }
+    
+    components.put(component.getName(), component);
   }
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public ServiceComponent addServiceComponent(String serviceComponentName)
       throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
-    try {
-      readWriteLock.writeLock().lock();
-      try {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Adding a ServiceComponent to Service"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", clusterId=" + cluster.getClusterId()
-              + ", serviceName=" + getName()
-              + ", serviceComponentName=" + serviceComponentName);
-        }
-        if (components.containsKey(serviceComponentName)) {
-          throw new AmbariException("Cannot add duplicate ServiceComponent"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", clusterId=" + cluster.getClusterId()
-              + ", serviceName=" + getName()
-              + ", serviceComponentName=" + serviceComponentName);
-        }
-        ServiceComponent component = serviceComponentFactory.createNew(this, serviceComponentName);
-        components.put(component.getName(), component);
-        return component;
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    } finally {
-      clusterGlobalLock.writeLock().unlock();
-    }
+    ServiceComponent component = serviceComponentFactory.createNew(this, serviceComponentName);
+    addServiceComponent(component);
+    return component;
   }
 
   @Override
@@ -460,36 +404,30 @@ public class ServiceImpl implements Service {
    * transaction is not necessary before this calling this method.
    */
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public void persist() {
-    clusterGlobalLock.writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        if (!persisted) {
-          persistEntities();
-          refresh();
-          // There refresh calls are no longer needed with cached references
-          // not used on getters/setters
-          // cluster.refresh();
-          persisted = true;
-
-          // publish the service installed event
-          StackId stackId = cluster.getDesiredStackVersion();
-          cluster.addService(this);
-
-          ServiceInstalledEvent event = new ServiceInstalledEvent(
-              getClusterId(), stackId.getStackName(),
-              stackId.getStackVersion(), getName());
-
-          eventPublisher.publish(event);
-        } else {
-          saveIfPersisted();
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
+      if (!persisted) {
+        persistEntities();
+        refresh();
+
+        persisted = true;
+
+        // publish the service installed event
+        StackId stackId = cluster.getDesiredStackVersion();
+        cluster.addService(this);
+
+        ServiceInstalledEvent event = new ServiceInstalledEvent(
+            getClusterId(), stackId.getStackName(),
+            stackId.getStackVersion(), getName());
+
+        eventPublisher.publish(event);
+      } else {
+        saveIfPersisted();
       }
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
   }
 
@@ -535,31 +473,26 @@ public class ServiceImpl implements Service {
   }
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public boolean canBeRemoved() {
-    clusterGlobalLock.readLock().lock();
+    readWriteLock.readLock().lock();
     try {
-      readWriteLock.readLock().lock();
-      try {
-        //
-        // A service can be deleted if all it's components
-        // can be removed, irrespective of the state of
-        // the service itself.
-        //
-        for (ServiceComponent sc : components.values()) {
-          if (!sc.canBeRemoved()) {
-            LOG.warn("Found non removable component when trying to delete service"
-                + ", clusterName=" + cluster.getClusterName()
-                + ", serviceName=" + getName()
-                + ", componentName=" + sc.getName());
-            return false;
-          }
+      //
+      // A service can be deleted if all it's components
+      // can be removed, irrespective of the state of
+      // the service itself.
+      //
+      for (ServiceComponent sc : components.values()) {
+        if (!sc.canBeRemoved()) {
+          LOG.warn("Found non removable component when trying to delete service" + ", clusterName="
+              + cluster.getClusterName() + ", serviceName=" + getName() + ", componentName="
+              + sc.getName());
+          return false;
         }
-        return true;
-      } finally {
-        readWriteLock.readLock().unlock();
       }
+      return true;
     } finally {
-      clusterGlobalLock.readLock().unlock();
+      readWriteLock.readLock().unlock();
     }
   }
 
@@ -599,71 +532,56 @@ public class ServiceImpl implements Service {
       serviceConfigDAO.remove(serviceConfigEntity);
     }
   }
-  
+
   @Override
   @Transactional
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public void deleteAllComponents() throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        LOG.info("Deleting all components for service"
-            + ", clusterName=" + cluster.getClusterName()
-            + ", serviceName=" + getName());
-        // FIXME check dependencies from meta layer
-        for (ServiceComponent component : components.values()) {
-          if (!component.canBeRemoved()) {
-            throw new AmbariException("Found non removable component when trying to"
-                + " delete all components from service"
-                + ", clusterName=" + cluster.getClusterName()
-                + ", serviceName=" + getName()
-                + ", componentName=" + component.getName());
-          }
-        }
-
-        for (ServiceComponent serviceComponent : components.values()) {
-          serviceComponent.delete();
+      LOG.info("Deleting all components for service" + ", clusterName=" + cluster.getClusterName()
+          + ", serviceName=" + getName());
+      // FIXME check dependencies from meta layer
+      for (ServiceComponent component : components.values()) {
+        if (!component.canBeRemoved()) {
+          throw new AmbariException("Found non removable component when trying to"
+              + " delete all components from service" + ", clusterName=" + cluster.getClusterName()
+              + ", serviceName=" + getName() + ", componentName=" + component.getName());
         }
+      }
 
-        components.clear();
-      } finally {
-        readWriteLock.writeLock().unlock();
+      for (ServiceComponent serviceComponent : components.values()) {
+        serviceComponent.delete();
       }
+
+      components.clear();
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
   }
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public void deleteServiceComponent(String componentName)
       throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        ServiceComponent component = getServiceComponent(componentName);
-        LOG.info("Deleting servicecomponent for cluster"
+      ServiceComponent component = getServiceComponent(componentName);
+      LOG.info("Deleting servicecomponent for cluster" + ", clusterName=" + cluster.getClusterName()
+          + ", serviceName=" + getName() + ", componentName=" + componentName);
+      // FIXME check dependencies from meta layer
+      if (!component.canBeRemoved()) {
+        throw new AmbariException("Could not delete component from cluster"
             + ", clusterName=" + cluster.getClusterName()
             + ", serviceName=" + getName()
             + ", componentName=" + componentName);
-        // FIXME check dependencies from meta layer
-        if (!component.canBeRemoved()) {
-          throw new AmbariException("Could not delete component from cluster"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", serviceName=" + getName()
-              + ", componentName=" + componentName);
-        }
-
-        component.delete();
-        components.remove(componentName);
-      } finally {
-        readWriteLock.writeLock().unlock();
       }
+
+      component.delete();
+      components.remove(componentName);
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
-
-
   }
 
   @Override
@@ -673,34 +591,28 @@ public class ServiceImpl implements Service {
 
   @Override
   @Transactional
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public void delete() throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        deleteAllComponents();
-        deleteAllServiceConfigs();
+      deleteAllComponents();
+      deleteAllServiceConfigs();
 
-        if (persisted) {
-          removeEntities();
-          persisted = false;
+      if (persisted) {
+        removeEntities();
+        persisted = false;
 
-          // publish the service removed event
-          StackId stackId = cluster.getDesiredStackVersion();
+        // publish the service removed event
+        StackId stackId = cluster.getDesiredStackVersion();
 
-          ServiceRemovedEvent event = new ServiceRemovedEvent(getClusterId(),
-              stackId.getStackName(), stackId.getStackVersion(), getName());
+        ServiceRemovedEvent event = new ServiceRemovedEvent(getClusterId(), stackId.getStackName(),
+            stackId.getStackVersion(), getName());
 
-          eventPublisher.publish(event);
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
+        eventPublisher.publish(event);
       }
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
-
-
   }
 
   @Transactional

http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 2f7d6b9..a6f0a3b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -336,12 +336,6 @@ public class ClusterImpl implements Cluster {
     this.eventPublisher = eventPublisher;
   }
 
-
-  @Override
-  public ReadWriteLock getClusterGlobalLock() {
-    return clusterGlobalLock;
-  }
-
   private void loadServiceConfigTypes() throws AmbariException {
     try {
       serviceConfigTypes = collectServiceConfigTypesMapping();

http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
index 1d6b1e8..9917720 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
@@ -44,7 +44,6 @@ import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -80,8 +79,6 @@ public class ConfigGroupImpl implements ConfigGroup {
   private ClusterDAO clusterDAO;
   @Inject
   Clusters clusters;
-  @Inject
-  private ConfigFactory configFactory;
 
   @AssistedInject
   public ConfigGroupImpl(@Assisted("cluster") Cluster cluster,
@@ -317,23 +314,18 @@ public class ConfigGroupImpl implements ConfigGroup {
 
   @Override
   public void persist() {
-    cluster.getClusterGlobalLock().writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        if (!isPersisted) {
-          persistEntities();
-          refresh();
-          cluster.refresh();
-          isPersisted = true;
-        } else {
-          saveIfPersisted();
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
+      if (!isPersisted) {
+        persistEntities();
+        refresh();
+        cluster.refresh();
+        isPersisted = true;
+      } else {
+        saveIfPersisted();
       }
     } finally {
-      cluster.getClusterGlobalLock().writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
   }
 
@@ -465,20 +457,15 @@ public class ConfigGroupImpl implements ConfigGroup {
 
   @Override
   public void delete() {
-    cluster.getClusterGlobalLock().writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-        configGroupHostMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-        configGroupDAO.removeByPK(configGroupEntity.getGroupId());
-        cluster.refresh();
-        isPersisted = false;
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
+      configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
+      configGroupHostMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
+      configGroupDAO.removeByPK(configGroupEntity.getGroupId());
+      cluster.refresh();
+      isPersisted = false;
     } finally {
-      cluster.getClusterGlobalLock().writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
   }
 
@@ -526,40 +513,33 @@ public class ConfigGroupImpl implements ConfigGroup {
 
   @Override
   public ConfigGroupResponse convertToResponse() throws AmbariException {
-    cluster.getClusterGlobalLock().readLock().lock();
+    readWriteLock.readLock().lock();
     try {
-      readWriteLock.readLock().lock();
-      try {
-        Set<Map<String, Object>> hostnames = new HashSet<Map<String, Object>>();
-        for (Host host : hosts.values()) {
-          Map<String, Object> hostMap = new HashMap<String, Object>();
-          hostMap.put("host_name", host.getHostName());
-          hostnames.add(hostMap);
-        }
+      Set<Map<String, Object>> hostnames = new HashSet<Map<String, Object>>();
+      for (Host host : hosts.values()) {
+        Map<String, Object> hostMap = new HashMap<String, Object>();
+        hostMap.put("host_name", host.getHostName());
+        hostnames.add(hostMap);
+      }
 
-        Set<Map<String, Object>> configObjMap = new HashSet<Map<String,
-          Object>>();
+      Set<Map<String, Object>> configObjMap = new HashSet<Map<String, Object>>();
 
-        for (Config config : configurations.values()) {
-          Map<String, Object> configMap = new HashMap<String, Object>();
-          configMap.put(ConfigurationResourceProvider
-            .CONFIGURATION_CONFIG_TYPE_PROPERTY_ID, config.getType());
-          configMap.put(ConfigurationResourceProvider
-            .CONFIGURATION_CONFIG_TAG_PROPERTY_ID, config.getTag());
-          configObjMap.add(configMap);
-        }
+      for (Config config : configurations.values()) {
+        Map<String, Object> configMap = new HashMap<String, Object>();
+        configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID,
+            config.getType());
+        configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID,
+            config.getTag());
+        configObjMap.add(configMap);
+      }
 
-        ConfigGroupResponse configGroupResponse = new ConfigGroupResponse(
+      ConfigGroupResponse configGroupResponse = new ConfigGroupResponse(
           configGroupEntity.getGroupId(), cluster.getClusterName(),
           configGroupEntity.getGroupName(), configGroupEntity.getTag(),
-          configGroupEntity.getDescription(),
-          hostnames, configObjMap);
-        return configGroupResponse;
-      } finally {
-        readWriteLock.readLock().unlock();
-      }
+          configGroupEntity.getDescription(), hostnames, configObjMap);
+      return configGroupResponse;
     } finally {
-      cluster.getClusterGlobalLock().readLock().unlock();
+      readWriteLock.readLock().unlock();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index 3b5ed28..7e345e5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -30,6 +30,8 @@ import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.agent.AlertDefinitionCommand;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -92,7 +94,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
   private static final Logger LOG =
       LoggerFactory.getLogger(ServiceComponentHostImpl.class);
 
-  private final ReadWriteLock clusterGlobalLock;
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
   private final Lock readLock = readWriteLock.readLock();
   private final Lock writeLock = readWriteLock.writeLock();
@@ -751,7 +752,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     }
 
     this.serviceComponent = serviceComponent;
-    clusterGlobalLock = serviceComponent.getClusterGlobalLock();
 
     HostEntity hostEntity = null;
     try {
@@ -805,7 +805,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
                                   Injector injector) {
     injector.injectMembers(this);
     this.serviceComponent = serviceComponent;
-    clusterGlobalLock = serviceComponent.getClusterGlobalLock();
 
     this.desiredStateEntity = desiredStateEntity;
     this.stateEntity = stateEntity;
@@ -1029,6 +1028,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public void handleEvent(ServiceComponentHostEvent event)
       throws InvalidStateTransitionException {
     if (LOG.isDebugEnabled()) {
@@ -1037,30 +1037,25 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
           + ", event=" + event.toString());
     }
     State oldState = getState();
-    clusterGlobalLock.readLock().lock();
     try {
+      writeLock.lock();
       try {
-        writeLock.lock();
-        try {
-          stateMachine.doTransition(event.getType(), event);
-          getStateEntity().setCurrentState(stateMachine.getCurrentState());
-          saveComponentStateEntityIfPersisted();
-          // TODO Audit logs
-        } catch (InvalidStateTransitionException e) {
-          LOG.error("Can't handle ServiceComponentHostEvent event at"
-              + " current state"
-              + ", serviceComponentName=" + getServiceComponentName()
-              + ", hostName=" + getHostName()
-            + ", currentState=" + oldState
-              + ", eventType=" + event.getType()
-              + ", event=" + event);
-          throw e;
-        }
-      } finally {
-        writeLock.unlock();
+        stateMachine.doTransition(event.getType(), event);
+        getStateEntity().setCurrentState(stateMachine.getCurrentState());
+        saveComponentStateEntityIfPersisted();
+        // TODO Audit logs
+      } catch (InvalidStateTransitionException e) {
+        LOG.error("Can't handle ServiceComponentHostEvent event at"
+            + " current state"
+            + ", serviceComponentName=" + getServiceComponentName()
+            + ", hostName=" + getHostName()
+          + ", currentState=" + oldState
+            + ", eventType=" + event.getType()
+            + ", event=" + event);
+        throw e;
       }
     } finally {
-      clusterGlobalLock.readLock().unlock();
+      writeLock.unlock();
     }
 
     if (!oldState.equals(getState())) {
@@ -1349,58 +1344,56 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
   }
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public ServiceComponentHostResponse convertToResponse(Map<String, DesiredConfig> desiredConfigs) {
-    clusterGlobalLock.readLock().lock();
+    readLock.lock();
     try {
-      readLock.lock();
-      try {
-        HostComponentStateEntity hostComponentStateEntity = getStateEntity();
-        if (null == hostComponentStateEntity) {
-          LOG.warn("Could not convert ServiceComponentHostResponse to a response. It's possible that Host " + getHostName() + " was deleted.");
-          return null;
-        }
-
-        String clusterName = serviceComponent.getClusterName();
-        String serviceName = serviceComponent.getServiceName();
-        String serviceComponentName = serviceComponent.getName();
-        String hostName = getHostName();
-        String state = getState().toString();
-        String stackId = getStackVersion().getStackId();
-        String desiredState = getDesiredState().toString();
-        String desiredStackId = getDesiredStackVersion().getStackId();
-        HostComponentAdminState componentAdminState = getComponentAdminState();
-        UpgradeState upgradeState = hostComponentStateEntity.getUpgradeState();
-
-        String displayName = null;
-        try {
-          ComponentInfo compInfo = ambariMetaInfo.getComponent(getStackVersion().getStackName(),
-            getStackVersion().getStackVersion(), serviceName, serviceComponentName);
-          displayName = compInfo.getDisplayName();
-        } catch (AmbariException e) {
-          displayName = serviceComponentName;
-        }
+      HostComponentStateEntity hostComponentStateEntity = getStateEntity();
+      if (null == hostComponentStateEntity) {
+        LOG.warn(
+            "Could not convert ServiceComponentHostResponse to a response. It's possible that Host {} was deleted.",
+            getHostName());
+        return null;
+      }
 
-        ServiceComponentHostResponse r = new ServiceComponentHostResponse(
-            clusterName, serviceName,
-            serviceComponentName, displayName, hostName, state,
-            stackId, desiredState,
-            desiredStackId, componentAdminState);
+      String clusterName = serviceComponent.getClusterName();
+      String serviceName = serviceComponent.getServiceName();
+      String serviceComponentName = serviceComponent.getName();
+      String hostName = getHostName();
+      String state = getState().toString();
+      String stackId = getStackVersion().getStackId();
+      String desiredState = getDesiredState().toString();
+      String desiredStackId = getDesiredStackVersion().getStackId();
+      HostComponentAdminState componentAdminState = getComponentAdminState();
+      UpgradeState upgradeState = hostComponentStateEntity.getUpgradeState();
+
+      String displayName = null;
+      try {
+        ComponentInfo compInfo = ambariMetaInfo.getComponent(getStackVersion().getStackName(),
+          getStackVersion().getStackVersion(), serviceName, serviceComponentName);
+        displayName = compInfo.getDisplayName();
+      } catch (AmbariException e) {
+        displayName = serviceComponentName;
+      }
 
-        r.setActualConfigs(actualConfigs);
-        r.setUpgradeState(upgradeState);
+      ServiceComponentHostResponse r = new ServiceComponentHostResponse(
+          clusterName, serviceName,
+          serviceComponentName, displayName, hostName, state,
+          stackId, desiredState,
+          desiredStackId, componentAdminState);
 
-        try {
-          r.setStaleConfig(helper.isStaleConfigs(this, desiredConfigs));
-        } catch (Exception e) {
-          LOG.error("Could not determine stale config", e);
-        }
+      r.setActualConfigs(actualConfigs);
+      r.setUpgradeState(upgradeState);
 
-        return r;
-      } finally {
-        readLock.unlock();
+      try {
+        r.setStaleConfig(helper.isStaleConfigs(this, desiredConfigs));
+      } catch (Exception e) {
+        LOG.error("Could not determine stale config", e);
       }
+
+      return r;
     } finally {
-      clusterGlobalLock.readLock().unlock();
+      readLock.unlock();
     }
   }
 
@@ -1448,52 +1441,29 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
    */
   @Override
   public void persist() {
-    boolean clusterWriteLockAcquired = false;
-    if (!persisted) {
-      clusterGlobalLock.writeLock().lock();
-      clusterWriteLockAcquired = true;
-    }
-
+    writeLock.lock();
     try {
-      writeLock.lock();
-      try {
-        if (!persisted) {
-          // persist the new cluster topology and then release the cluster lock
-          // as it has no more bearing on the rest of this persist() method
-          persistEntities();
-          persisted = true;
-
-          clusterGlobalLock.writeLock().unlock();
-          clusterWriteLockAcquired = false;
-
-          // these should still be done with the internal lock
-          refresh();
-          // There refresh calls are no longer needed with cached references
-          // not used on getters/setters
-          // NOTE: Refreshing parents is a bad pattern.
-          //host.refresh();
-          //serviceComponent.refresh();
-
-          // publish the service component installed event
-          StackId stackId = getDesiredStackVersion();
-
-          ServiceComponentInstalledEvent event = new ServiceComponentInstalledEvent(
-              getClusterId(), stackId.getStackName(),
-              stackId.getStackVersion(), getServiceName(), getServiceComponentName(), getHostName(),
-                  isRecoveryEnabled());
-
-          eventPublisher.publish(event);
-        } else {
-          saveComponentStateEntityIfPersisted();
-          saveComponentDesiredStateEntityIfPersisted();
-        }
-      } finally {
-        writeLock.unlock();
+      if (!persisted) {
+        // persist the new cluster topology
+        persistEntities();
+        persisted = true;
+
+        refresh();
+
+        // publish the service component installed event
+        StackId stackId = getDesiredStackVersion();
+
+        ServiceComponentInstalledEvent event = new ServiceComponentInstalledEvent(getClusterId(),
+            stackId.getStackName(), stackId.getStackVersion(), getServiceName(),
+            getServiceComponentName(), getHostName(), isRecoveryEnabled());
+
+        eventPublisher.publish(event);
+      } else {
+        saveComponentStateEntityIfPersisted();
+        saveComponentDesiredStateEntityIfPersisted();
       }
     } finally {
-      if (clusterWriteLockAcquired) {
-        clusterGlobalLock.writeLock().unlock();
-      }
+      writeLock.unlock();
     }
   }
 
@@ -1568,8 +1538,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public boolean canBeRemoved() {
-    clusterGlobalLock.readLock().lock();
     boolean schLockAcquired = false;
     try {
       // if unable to read, then writers are writing; cannot remove SCH
@@ -1581,38 +1551,33 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
       if (schLockAcquired) {
         readLock.unlock();
       }
-      clusterGlobalLock.readLock().unlock();
     }
   }
 
   @Override
+  @Experimental(feature = ExperimentalFeature.CLUSTER_GLOBAL_LOCK_REMOVAL)
   public void delete() {
     boolean fireRemovalEvent = false;
 
-    clusterGlobalLock.writeLock().lock();
+    writeLock.lock();
     try {
-      writeLock.lock();
-      try {
-        if (persisted) {
-          removeEntities();
-
-          // host must be re-loaded from db to refresh the cached JPA HostEntity
-          // that references HostComponentDesiredStateEntity
-          // and HostComponentStateEntity JPA entities
-          host.refresh();
+      if (persisted) {
+        removeEntities();
 
-          persisted = false;
-          fireRemovalEvent = true;
-        }
+        // host must be re-loaded from db to refresh the cached JPA HostEntity
+        // that references HostComponentDesiredStateEntity
+        // and HostComponentStateEntity JPA entities
+        host.refresh();
 
-        clusters.getCluster(getClusterName()).removeServiceComponentHost(this);
-      } catch (AmbariException ex) {
-        LOG.error("Unable to remove a service component from a host", ex);
-      } finally {
-        writeLock.unlock();
+        persisted = false;
+        fireRemovalEvent = true;
       }
+
+      clusters.getCluster(getClusterName()).removeServiceComponentHost(this);
+    } catch (AmbariException ex) {
+      LOG.error("Unable to remove a service component from a host", ex);
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      writeLock.unlock();
     }
 
     // publish event for the removal of the SCH after the removal is

http://git-wip-us.apache.org/repos/asf/ambari/blob/561c6f2f/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
index 387205d..f9dd5d1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
@@ -18,12 +18,18 @@
 package org.apache.ambari.server.update;
 
 
-import com.google.gson.JsonObject;
-import com.google.gson.JsonPrimitive;
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import junit.framework.Assert;
+import static org.easymock.EasyMock.anyString;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.persistence.EntityManager;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariManagementController;
@@ -51,18 +57,13 @@ import org.easymock.EasyMock;
 import org.easymock.EasyMockSupport;
 import org.junit.Test;
 
-import javax.persistence.EntityManager;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReadWriteLock;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonPrimitive;
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
 
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
+import junit.framework.Assert;
 
 public class HostUpdateHelperTest {
 
@@ -217,8 +218,6 @@ public class HostUpdateHelperTest {
     ClusterConfigEntity mockClusterConfigEntity3 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
     ClusterConfigEntity mockClusterConfigEntity4 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
     StackEntity mockStackEntity = easyMockSupport.createNiceMock(StackEntity.class);
-    ReadWriteLock mockReadWriteLock = easyMockSupport.createNiceMock(ReadWriteLock.class);
-    Lock mockLock = easyMockSupport.createNiceMock(Lock.class);
     Map<String, Map<String, String>> clusterHostsToChange = new HashMap<>();
     Map<String, String> hosts = new HashMap<>();
     List<ClusterConfigEntity> clusterConfigEntities1 = new ArrayList<>();
@@ -254,11 +253,8 @@ public class HostUpdateHelperTest {
     expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
 
     expect(mockClusters.getCluster("cl1")).andReturn(mockCluster).once();
-    expect(mockCluster.getClusterGlobalLock()).andReturn(mockReadWriteLock).atLeastOnce();
     expect(mockCluster.getClusterId()).andReturn(1L).atLeastOnce();
 
-    expect(mockReadWriteLock.writeLock()).andReturn(mockLock).atLeastOnce();
-
     expect(mockClusterEntity1.getClusterConfigEntities()).andReturn(clusterConfigEntities1).atLeastOnce();
     expect(mockClusterEntity2.getClusterConfigEntities()).andReturn(clusterConfigEntities2).atLeastOnce();
 


[07/17] ambari git commit: AMBARI-18459: Print error messages if bulkcommand section of a component includes non-existing component (dili)

Posted by jo...@apache.org.
AMBARI-18459: Print error messages if bulkcommand section of a component includes non-existing component (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/53b4bd41
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/53b4bd41
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/53b4bd41

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 53b4bd41518a5292d3c6c9cdafecee3898d046f3
Parents: 87423d6
Author: Di Li <di...@apache.org>
Authored: Tue Sep 27 10:58:59 2016 -0400
Committer: Di Li <di...@apache.org>
Committed: Tue Sep 27 10:58:59 2016 -0400

----------------------------------------------------------------------
 .../ambari/server/stack/ServiceModule.java      |  4 +--
 .../apache/ambari/server/stack/StackModule.java | 37 ++++++++++++++++++--
 2 files changed, 37 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/53b4bd41/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceModule.java
index 650bdf1..34e65c3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceModule.java
@@ -180,7 +180,7 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> implem
       return;
     }
 
-    LOG.info("Resolve service");
+    LOG.debug("Resolve service");
 
     // If resolving against parent stack service module (stack inheritance), do not merge if an
     // explicit parent is specified
@@ -193,7 +193,7 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> implem
     if (serviceInfo.getComment() == null) {
       serviceInfo.setComment(parent.getComment());
     }
-    LOG.info("Display name service/parent: " + serviceInfo.getDisplayName() + "/" + parent.getDisplayName());
+    LOG.info(String.format("Display name service/parent: %s/%s", serviceInfo.getDisplayName(), parent.getDisplayName()));
     if (serviceInfo.getDisplayName() == null) {
       serviceInfo.setDisplayName(parent.getDisplayName());
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/53b4bd41/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 93eeb7e..d9eaf27 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -31,6 +31,8 @@ import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.state.BulkCommandDefinition;
+import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.ExtensionInfo;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
@@ -182,7 +184,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       StackModule parentModule, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
       throws AmbariException {
     moduleState = ModuleState.VISITED;
-    LOG.info("Resolve: " + stackInfo.getName() + ":" + stackInfo.getVersion());
+    LOG.info(String.format("Resolve: %s:%s", stackInfo.getName(), stackInfo.getVersion()));
     String parentVersion = stackInfo.getParentStackVersion();
     mergeServicesWithExplicitParent(allStacks, commonServices, extensions);
     addExtensionServices();
@@ -204,6 +206,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     processUpgradePacks();
     processRepositories();
     processPropertyDependencies();
+    validateBulkCommandComponents(allStacks);
     moduleState = ModuleState.RESOLVED;
   }
 
@@ -351,7 +354,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
       throws AmbariException {
 
-    LOG.info("mergeServiceWithExplicitParent" + parent);
+    LOG.info(String.format("Merge service %s with explicit parent: %s", service.getModuleInfo().getName(), parent));
     if(isCommonServiceParent(parent)) {
       mergeServiceWithCommonServiceParent(service, parent, allStacks, commonServices, extensions);
     } else if(isExtensionServiceParent(parent)) {
@@ -1198,6 +1201,36 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     }
   }
 
+  /**
+   * Validate the component defined in the bulkCommand section is defined for the service
+   * This needs to happen after the stack is resolved
+   * */
+  private void validateBulkCommandComponents(Map<String, StackModule> allStacks){
+    if (null != stackInfo) {
+      String currentStackId = stackInfo.getName() + StackManager.PATH_DELIMITER + stackInfo.getVersion();
+      LOG.debug("Validate bulk command components for: " + currentStackId);
+      StackModule currentStack = allStacks.get(currentStackId);
+      if (null != currentStack){
+        for (ServiceModule serviceModule : currentStack.getServiceModules().values()) {
+          ServiceInfo service = serviceModule.getModuleInfo();
+          for(ComponentInfo component: service.getComponents()){
+            BulkCommandDefinition bcd = component.getBulkCommandDefinition();
+            if (null != bcd && null != bcd.getMasterComponent()){
+              String name = bcd.getMasterComponent();
+              ComponentInfo targetComponent = service.getComponentByName(name);
+              if (null == targetComponent){
+                String serviceName = service.getName();
+                LOG.error(
+                    String.format("%s bulk command section for service %s in stack %s references a component %s which doesn't exist.",
+                        component.getName(), serviceName, currentStackId, name));
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
   @Override
   public boolean isValid() {
     return valid;


[14/17] ambari git commit: AMBARI-18401. Allow running a subset of Python unit tests. (Attila Doroszlai via stoader)

Posted by jo...@apache.org.
AMBARI-18401. Allow running a subset of Python unit tests. (Attila Doroszlai via stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/39858cca
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/39858cca
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/39858cca

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 39858ccafcee6c49bba21d7385d7129d71dc8851
Parents: 2700bd1
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Wed Sep 28 13:58:32 2016 +0200
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Wed Sep 28 13:58:32 2016 +0200

----------------------------------------------------------------------
 ambari-server/pom.xml                      |  2 ++
 ambari-server/src/test/python/unitTests.py | 25 ++++++++++---------------
 2 files changed, 12 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/39858cca/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 354b6cb..d507b82 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -28,6 +28,7 @@
     <!-- On centos the python xml's are inside python package -->
     <deb.architecture>amd64</deb.architecture>
     <custom.tests>false</custom.tests>
+    <python.test.mask>[Tt]est*.py</python.test.mask>
     <hdpUrlForCentos6>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.1.1.0</hdpUrlForCentos6>
     <hdpLatestUrl>http://public-repo-1.hortonworks.com/HDP/hdp_urlinfo.json</hdpLatestUrl>
     <ambari_commons.install.dir>/usr/lib/ambari-server/lib/ambari_commons</ambari_commons.install.dir>
@@ -621,6 +622,7 @@
               <arguments>
                 <argument>unitTests.py</argument>
                 <argument>${custom.tests}</argument>
+                <argument>${python.test.mask}</argument>
               </arguments>
               <environmentVariables>
                   <PYTHONPATH>${path.python.1}${pathsep}$PYTHONPATH</PYTHONPATH>

http://git-wip-us.apache.org/repos/asf/ambari/blob/39858cca/ambari-server/src/test/python/unitTests.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/unitTests.py b/ambari-server/src/test/python/unitTests.py
index 037b6a5..7941ed3 100644
--- a/ambari-server/src/test/python/unitTests.py
+++ b/ambari-server/src/test/python/unitTests.py
@@ -86,17 +86,12 @@ def get_stack_name():
 def get_stack_name():
   return "HDP"
 
-def stack_test_executor(base_folder, service, stack, custom_tests, executor_result):
+def stack_test_executor(base_folder, service, stack, test_mask, executor_result):
   """
   Stack tests executor. Must be executed in separate process to prevent module
   name conflicts in different stacks.
   """
   #extract stack scripts folders
-  if custom_tests:
-    test_mask = CUSTOM_TEST_MASK
-  else:
-    test_mask = TEST_MASK
-
   server_src_dir = get_parent_path(base_folder, 'src')
   script_folders = set()
 
@@ -152,10 +147,14 @@ def stack_test_executor(base_folder, service, stack, custom_tests, executor_resu
 
 def main():
   if not os.path.exists(newtmpdirpath): os.makedirs(newtmpdirpath)
-  custom_tests = False
-  if len(sys.argv) > 1:
-    if sys.argv[1] == "true":
-      custom_tests = True
+
+  if len(sys.argv) > 1 and sys.argv[1] == "true": # handle custom_tests for backward-compatibility
+    test_mask = CUSTOM_TEST_MASK
+  elif len(sys.argv) > 2:
+    test_mask = sys.argv[2]
+  else:
+    test_mask = TEST_MASK
+
   pwd = os.path.abspath(os.path.dirname(__file__))
 
   ambari_server_folder = get_parent_path(pwd, 'ambari-server')
@@ -212,7 +211,7 @@ def main():
                                       args=(variant['directory'],
                                             variant['service'],
                                             variant['stack'],
-                                            custom_tests,
+                                            test_mask,
                                             executor_result)
           )
     process.start()
@@ -238,10 +237,6 @@ def main():
 
   #run base ambari-server tests
   sys.stderr.write("Running tests for ambari-server\n")
-  if custom_tests:
-    test_mask = CUSTOM_TEST_MASK
-  else:
-    test_mask = TEST_MASK
 
   test_dirs = [
     (os.path.join(pwd, 'custom_actions'), "\nRunning tests for custom actions\n"),


[08/17] ambari git commit: AMBARI-18051 - Services should be able to provide their own pre-req checks by supplying a jar file

Posted by jo...@apache.org.
AMBARI-18051 - Services should be able to provide their own pre-req checks by supplying a jar file


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7b924342
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7b924342
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7b924342

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 7b92434290f1e092e228bcc74584d4a4630ac392
Parents: 53b4bd4
Author: Tim Thorpe <tt...@apache.org>
Authored: Tue Sep 27 08:21:56 2016 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Tue Sep 27 08:21:56 2016 -0700

----------------------------------------------------------------------
 .../server/sample/checks/SampleServiceCheck.java   | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7b924342/ambari-server/src/test/java/org/apache/ambari/server/sample/checks/SampleServiceCheck.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/sample/checks/SampleServiceCheck.java b/ambari-server/src/test/java/org/apache/ambari/server/sample/checks/SampleServiceCheck.java
index c91793e..1c16040 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/sample/checks/SampleServiceCheck.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/sample/checks/SampleServiceCheck.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.ambari.server.sample.checks;
 
 import org.apache.ambari.server.AmbariException;


[15/17] ambari git commit: AMBARI-18478. Ambari UI - Service Actions menu for pluralized value has grammatical error (onechiporenko)

Posted by jo...@apache.org.
AMBARI-18478. Ambari UI - Service Actions menu for pluralized value has grammatical error (onechiporenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e44b8805
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e44b8805
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e44b8805

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: e44b880514109011fadb9c274b3b8163f13390d8
Parents: 39858cc
Author: Oleg Nechiporenko <on...@apache.org>
Authored: Wed Sep 28 11:41:20 2016 +0300
Committer: Oleg Nechiporenko <on...@apache.org>
Committed: Wed Sep 28 15:29:33 2016 +0300

----------------------------------------------------------------------
 ambari-web/app/assets/licenses/NOTICE.txt       |   3 +
 ambari-web/app/messages.js                      |  13 +-
 ambari-web/app/utils/string_utils.js            |   7 +-
 .../app/views/common/rolling_restart_view.js    |  19 +-
 ambari-web/app/views/main/service/item.js       |   3 +-
 .../service/widgets/create/expression_view.js   |   2 +-
 ambari-web/brunch-config.js                     |   3 +-
 .../resourceManager/wizard_controller_test.js   |   1 -
 ambari-web/test/models/cluster_test.js          |  12 +-
 .../objects/service_config_property_test.js     |  31 +-
 .../configs/theme/sub_section_tab_test.js       |   2 +-
 .../test/views/main/host/log_metrics_test.js    |   1 -
 ambari-web/test/views/main/host_test.js         |   4 +-
 ambari-web/vendor/scripts/pluralize.js          | 461 +++++++++++++++++++
 14 files changed, 506 insertions(+), 56 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e44b8805/ambari-web/app/assets/licenses/NOTICE.txt
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/licenses/NOTICE.txt b/ambari-web/app/assets/licenses/NOTICE.txt
index c750a37..75a13ea 100644
--- a/ambari-web/app/assets/licenses/NOTICE.txt
+++ b/ambari-web/app/assets/licenses/NOTICE.txt
@@ -60,3 +60,6 @@ Copyright (C) 2015 Leaf Corcoran (leafot [at] gmail [*dot*] com)
 
 This product includes bootstrap-contextmenu v.0.3.3 (https://github.com/sydcanem/bootstrap-contextmenu - MIT License)
 Copyright (C) 2015 James Santos
+
+This product includes pluralize v.3.0.0 (https://github.com/blakeembrey/pluralize - MIT License)
+Copyright (C) 2016 Blake Embrey

http://git-wip-us.apache.org/repos/asf/ambari/blob/e44b8805/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 2c819e5..1c53839 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -2913,14 +2913,13 @@ Em.I18n.translations = {
   'tableView.filters.filteredAlertInstancesInfo': '{0} of {1} instances showing',
   'tableView.filters.filteredLogsInfo': '{0} of {1} logs showing',
 
-  'rollingrestart.dialog.title': 'Restart {0}s',
+  'rollingrestart.dialog.title': 'Restart {0}',
   'rollingrestart.dialog.primary': 'Trigger Rolling Restart',
   'rollingrestart.notsupported.hostComponent': 'Rolling restart not supported for {0} components',
-  'rollingrestart.dialog.msg.restart': 'This will restart a specified number of {0}s at a time.',
-  'rollingrestart.dialog.msg.noRestartHosts': 'There are no {0}s to do rolling restarts',
+  'rollingrestart.dialog.msg.restart': 'This will restart a specified number of {0} at a time.',
+  'rollingrestart.dialog.msg.noRestartHosts': 'There are no {0} to do rolling restarts',
   'rollingrestart.dialog.msg.maintainance': 'Note: {0} {1} in Maintenance Mode will not be restarted',
-  'rollingrestart.dialog.msg.maintainance.plural': 'Note: {0} {1}s in Maintenance Mode will not be restarted',
-  'rollingrestart.dialog.msg.componentsAtATime': '{0}s at a time',
+  'rollingrestart.dialog.msg.componentsAtATime': '{0} at a time',
   'rollingrestart.dialog.msg.timegap.prefix': 'Wait ',
   'rollingrestart.dialog.msg.timegap.suffix': 'seconds between batches ',
   'rollingrestart.dialog.msg.toleration.prefix': 'Tolerate up to ',
@@ -2930,7 +2929,7 @@ Em.I18n.translations = {
   'rollingrestart.dialog.err.invalid.toleratesize': 'Invalid failure toleration count: {0}',
   'rollingrestart.dialog.warn.datanode.batch.size': 'Restarting more than one DataNode at a time is not recommended. Doing so can lead to data unavailability and/or possible loss of data being actively written to HDFS.',
   'rollingrestart.dialog.msg.serviceNotInMM':'Note: This will trigger alerts. To suppress alerts, turn on Maintenance Mode for {0} prior to triggering a rolling restart',
-  'rollingrestart.dialog.msg.staleConfigsOnly': 'Only restart {0}s with stale configs',
+  'rollingrestart.dialog.msg.staleConfigsOnly': 'Only restart {0} with stale configs',
   'rollingrestart.rest.context': 'Rolling Restart of {0}s - batch {1} of {2}',
   'rollingrestart.context.allOnSelectedHosts':'Restart all components on the selected hosts',
   'rollingrestart.context.allForSelectedService':'Restart all components for {0}',
@@ -2962,7 +2961,7 @@ Em.I18n.translations = {
   'widget.create.wizard.step2.addExpression': 'Add Expression',
   'widget.create.wizard.step2.addDataset': 'Add data set',
   'widget.create.wizard.step2.body.gauge.overflow.warning':'Overflowed! Gauge can only display number between 0 and 1.',
-  'widget.create.wizard.step2.allComponents': 'All {0}s',
+  'widget.create.wizard.step2.allComponents': 'All {0}',
   'widget.create.wizard.step2.activeComponents': 'Active {0}',
   'widget.create.wizard.step2.noMetricFound': 'No metric found',
   'widget.create.wizard.step3.widgetName': 'Name',

http://git-wip-us.apache.org/repos/asf/ambari/blob/e44b8805/ambari-web/app/utils/string_utils.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/string_utils.js b/ambari-web/app/utils/string_utils.js
index 3754ba1..f4e3674 100644
--- a/ambari-web/app/utils/string_utils.js
+++ b/ambari-web/app/utils/string_utils.js
@@ -201,11 +201,8 @@ module.exports = {
    * @method pluralize
    */
   pluralize: function(count, singular, plural) {
-    plural = plural || singular + 's';
-    if (count > 1) {
-      return plural;
-    }
-    return singular;
+    var _plural = plural || pluralize(singular);
+    return count > 1 ? _plural : singular;
   },
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/e44b8805/ambari-web/app/views/common/rolling_restart_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/rolling_restart_view.js b/ambari-web/app/views/common/rolling_restart_view.js
index 0d849a2..64b8610 100644
--- a/ambari-web/app/views/common/rolling_restart_view.js
+++ b/ambari-web/app/views/common/rolling_restart_view.js
@@ -119,7 +119,7 @@ App.RollingRestartView = Em.View.extend({
    * List of errors is saved to <code>errors</code>
    */
   validate : function() {
-    var displayName = this.get('hostComponentDisplayName');
+    var displayName = pluralize(this.get('hostComponentDisplayName'));
     var componentName = this.get('hostComponentName');
     var totalCount = this.get('restartHostComponents.length');
     var bs = this.get('batchSize');
@@ -207,7 +207,9 @@ App.RollingRestartView = Em.View.extend({
   /**
    * @type {String}
    */
-  restartMessage: Em.computed.i18nFormat('rollingrestart.dialog.msg.restart', 'hostComponentDisplayName'),
+  restartMessage : function() {
+    return Em.I18n.t('rollingrestart.dialog.msg.restart').format(pluralize(this.get('hostComponentDisplayName')));
+  }.property('hostComponentDisplayName'),
 
   /**
    * @type {String}
@@ -216,10 +218,7 @@ App.RollingRestartView = Em.View.extend({
     var count = this.get('componentsWithMaintenanceHost.length');
     if (count > 0) {
       var name = this.get('hostComponentDisplayName');
-      if (count > 1) {
-        return Em.I18n.t('rollingrestart.dialog.msg.maintainance.plural').format(count, name)
-      }
-      return Em.I18n.t('rollingrestart.dialog.msg.maintainance').format(count, name)
+      return Em.I18n.t('rollingrestart.dialog.msg.maintainance').format(count, pluralize(name));
     }
     return null;
   }.property('componentsWithMaintenanceHost', 'hostComponentDisplayName'),
@@ -227,11 +226,15 @@ App.RollingRestartView = Em.View.extend({
   /**
    * @type {String}
    */
-  batchSizeMessage: Em.computed.i18nFormat('rollingrestart.dialog.msg.componentsAtATime', 'hostComponentDisplayName'),
+  batchSizeMessage : function() {
+    return Em.I18n.t('rollingrestart.dialog.msg.componentsAtATime').format(pluralize(this.get('hostComponentDisplayName')));
+  }.property('hostComponentDisplayName'),
 
   /**
    * @type {String}
    */
-  staleConfigsOnlyMessage: Em.computed.i18nFormat('rollingrestart.dialog.msg.staleConfigsOnly', 'hostComponentDisplayName')
+  staleConfigsOnlyMessage : function() {
+    return Em.I18n.t('rollingrestart.dialog.msg.staleConfigsOnly').format(pluralize(this.get('hostComponentDisplayName')));
+  }.property('hostComponentDisplayName')
 
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/e44b8805/ambari-web/app/views/main/service/item.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/item.js b/ambari-web/app/views/main/service/item.js
index a007e17..fc9c4f3 100644
--- a/ambari-web/app/views/main/service/item.js
+++ b/ambari-web/app/views/main/service/item.js
@@ -146,9 +146,10 @@ App.MainServiceItemView = Em.View.extend({
       allSlaves.concat(allMasters).filter(function (_component) {
         return App.get('components.rollinRestartAllowed').contains(_component);
       }).forEach(function(_component) {
+        var _componentNamePluralized = pluralize(App.format.role(_component, false));
         options.push(self.createOption(actionMap.ROLLING_RESTART, {
           context: _component,
-          label: actionMap.ROLLING_RESTART.label.format(App.format.role(_component, false))
+          label: actionMap.ROLLING_RESTART.label.format(_componentNamePluralized)
         }));
       });
       allMasters.filter(function(master) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e44b8805/ambari-web/app/views/main/service/widgets/create/expression_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/widgets/create/expression_view.js b/ambari-web/app/views/main/service/widgets/create/expression_view.js
index a12bf99..7afe287 100644
--- a/ambari-web/app/views/main/service/widgets/create/expression_view.js
+++ b/ambari-web/app/views/main/service/widgets/create/expression_view.js
@@ -358,7 +358,7 @@ App.AddMetricExpressionView = Em.View.extend({
                 return Em.I18n.t('widget.create.wizard.step2.activeComponents').format(stackComponent.get('displayName'));
               }
             }
-            return Em.I18n.t('widget.create.wizard.step2.allComponents').format(stackComponent.get('displayName'));
+            return Em.I18n.t('widget.create.wizard.step2.allComponents').format(pluralize(stackComponent.get('displayName')));
           }.property('componentName', 'level'),
           count: servicesMap[serviceName].components[componentId].count,
           metrics: servicesMap[serviceName].components[componentId].metrics.uniq().sort(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/e44b8805/ambari-web/brunch-config.js
----------------------------------------------------------------------
diff --git a/ambari-web/brunch-config.js b/ambari-web/brunch-config.js
index 64ac946..d71f8da 100644
--- a/ambari-web/brunch-config.js
+++ b/ambari-web/brunch-config.js
@@ -74,7 +74,8 @@ module.exports.config = {
           'vendor/scripts/spin.js',
           'vendor/scripts/jquery.flexibleArea.js',
           'vendor/scripts/FileSaver.js',
-          'vendor/scripts/Blob.js'
+          'vendor/scripts/Blob.js',
+          'vendor/scripts/pluralize.js'
         ]
       }
     },

http://git-wip-us.apache.org/repos/asf/ambari/blob/e44b8805/ambari-web/test/controllers/main/admin/highAvailability/resourceManager/wizard_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/admin/highAvailability/resourceManager/wizard_controller_test.js b/ambari-web/test/controllers/main/admin/highAvailability/resourceManager/wizard_controller_test.js
index 5a991d3..19fbea6 100644
--- a/ambari-web/test/controllers/main/admin/highAvailability/resourceManager/wizard_controller_test.js
+++ b/ambari-web/test/controllers/main/admin/highAvailability/resourceManager/wizard_controller_test.js
@@ -18,7 +18,6 @@
 
 var App = require('app');
 require('controllers/main/admin/highAvailability/resourceManager/wizard_controller');
-var testHelpers = require('test/helpers');
 
 describe('App.RMHighAvailabilityWizardController', function () {
   var controller;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e44b8805/ambari-web/test/models/cluster_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/models/cluster_test.js b/ambari-web/test/models/cluster_test.js
index a6bafba..604e50a 100644
--- a/ambari-web/test/models/cluster_test.js
+++ b/ambari-web/test/models/cluster_test.js
@@ -29,7 +29,7 @@ describe('App.Cluster', function () {
 
   describe('#isKerberosEnabled', function () {
 
-    var cases = [
+    [
       {
         securityType: 'KERBEROS',
         isKerberosEnabled: true,
@@ -40,9 +40,7 @@ describe('App.Cluster', function () {
         isKerberosEnabled: false,
         title: 'Kerberos disabled'
       }
-    ];
-
-    cases.forEach(function (item) {
+    ].forEach(function (item) {
 
       it(item.title, function () {
         cluster.set('securityType', item.securityType);
@@ -53,7 +51,7 @@ describe('App.Cluster', function () {
 
     describe('#isCredentialStorePersistent', function () {
 
-      var cases = [
+      [
         {
           propertyValue: 'false',
           isCredentialStorePersistent: false,
@@ -69,9 +67,7 @@ describe('App.Cluster', function () {
           isCredentialStorePersistent: true,
           title: 'persistent credential store'
         }
-      ];
-
-      cases.forEach(function (item) {
+      ].forEach(function (item) {
 
         it(item.title, function () {
           cluster.set('credentialStoreProperties', {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e44b8805/ambari-web/test/models/configs/objects/service_config_property_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/models/configs/objects/service_config_property_test.js b/ambari-web/test/models/configs/objects/service_config_property_test.js
index 49613a44..ef0bd61 100644
--- a/ambari-web/test/models/configs/objects/service_config_property_test.js
+++ b/ambari-web/test/models/configs/objects/service_config_property_test.js
@@ -189,9 +189,7 @@ describe('App.ServiceConfigProperty', function () {
   App.TestAliases.testAsComputedAnd(getProperty(), 'hideFinalIcon', ['!isFinal', 'isNotEditable']);
 
   describe('#placeholder', function () {
-    it('should equal foo', function() {
-      serviceConfigProperty.set('isEditable', true);
-      var testCases = [
+      [
         {
           placeholderText: 'foo',
           savedValue: ''
@@ -204,26 +202,19 @@ describe('App.ServiceConfigProperty', function () {
           placeholderText: 'foo',
           savedValue: 'bar'
         }
-      ];
-      testCases.forEach(function (item) {
-        serviceConfigProperty.set('placeholderText', item.placeholderText);
-        serviceConfigProperty.set('savedValue', item.savedValue);
-        expect(serviceConfigProperty.get('placeholder')).to.equal('foo');
-      });
+      ].forEach(function (item) {
+        it('should equal foo, placeholder = ' + JSON.stringify(item.placeholderText), function() {
+          serviceConfigProperty.set('isEditable', true);
+          serviceConfigProperty.set('placeholderText', item.placeholderText);
+          serviceConfigProperty.set('savedValue', item.savedValue);
+          expect(serviceConfigProperty.get('placeholder')).to.equal('foo');
+        });
     });
     it('should equal null', function() {
       serviceConfigProperty.set('isEditable', false);
-      var testCases = [
-        {
-          placeholderText: 'foo',
-          savedValue: 'bar'
-        }
-      ];
-      testCases.forEach(function (item) {
-        serviceConfigProperty.set('placeholderText', item.placeholderText);
-        serviceConfigProperty.set('savedValue', item.savedValue);
-        expect(serviceConfigProperty.get('placeholder')).to.equal(null);
-      });
+      serviceConfigProperty.set('placeholderText', 'foo');
+      serviceConfigProperty.set('savedValue', 'bar');
+      expect(serviceConfigProperty.get('placeholder')).to.equal(null);
     });
   });
   describe('#isPropertyOverridable', function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e44b8805/ambari-web/test/models/configs/theme/sub_section_tab_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/models/configs/theme/sub_section_tab_test.js b/ambari-web/test/models/configs/theme/sub_section_tab_test.js
index 6044432..0c3b98c 100644
--- a/ambari-web/test/models/configs/theme/sub_section_tab_test.js
+++ b/ambari-web/test/models/configs/theme/sub_section_tab_test.js
@@ -155,7 +155,7 @@ describe('App.SubSectionTab', function () {
 
     it('should include visible properties with errors', function () {
       subSectionTab.set('configs', configs);
-      expect(subSectionTab.get('errorsCount')).to.eql(8);
+      expect(subSectionTab.get('errorsCount')).to.be.equal(8);
     });
 
   });

http://git-wip-us.apache.org/repos/asf/ambari/blob/e44b8805/ambari-web/test/views/main/host/log_metrics_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/host/log_metrics_test.js b/ambari-web/test/views/main/host/log_metrics_test.js
index a0a3c6c..52f4e55 100644
--- a/ambari-web/test/views/main/host/log_metrics_test.js
+++ b/ambari-web/test/views/main/host/log_metrics_test.js
@@ -17,7 +17,6 @@
  */
 
 var App = require('app');
-var fileUtils = require('utils/file_utils');
 
 describe('App.MainHostLogMetrics', function() {
   var view;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e44b8805/ambari-web/test/views/main/host_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/host_test.js b/ambari-web/test/views/main/host_test.js
index 0b789f6..79cc65c 100644
--- a/ambari-web/test/views/main/host_test.js
+++ b/ambari-web/test/views/main/host_test.js
@@ -773,7 +773,7 @@ describe('App.MainHostView', function () {
     describe("#restartRequiredComponentsMessage", function () {
 
       it("5 components require restart", function() {
-        var content = 'c1, c2, c3, c4, c5' + ' ' + Em.I18n.t('common.components').toLowerCase();
+        var content = 'c1, c2, c3, c4, c5 ' + Em.I18n.t('common.components').toLowerCase();
         hostView.set('content.componentsWithStaleConfigsCount', 5);
         hostView.set('content.componentsWithStaleConfigs', [
           {displayName: 'c1'},
@@ -789,7 +789,7 @@ describe('App.MainHostView', function () {
       });
 
       it("1 component require restart", function() {
-        var content = 'c1' + ' ' + Em.I18n.t('common.component').toLowerCase();
+        var content = 'c1 ' + Em.I18n.t('common.component').toLowerCase();
         hostView.set('content.componentsWithStaleConfigsCount', 1);
         hostView.set('content.componentsWithStaleConfigs', [
           {displayName: 'c1'}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e44b8805/ambari-web/vendor/scripts/pluralize.js
----------------------------------------------------------------------
diff --git a/ambari-web/vendor/scripts/pluralize.js b/ambari-web/vendor/scripts/pluralize.js
new file mode 100644
index 0000000..7246db1
--- /dev/null
+++ b/ambari-web/vendor/scripts/pluralize.js
@@ -0,0 +1,461 @@
+/* global define */
+
+(function (root, pluralize) {
+  /* istanbul ignore else */
+  if (typeof require === 'function' && typeof exports === 'object' && typeof module === 'object') {
+    // Node.
+    module.exports = pluralize();
+  } else if (typeof define === 'function' && define.amd) {
+    // AMD, registers as an anonymous module.
+    define(function () {
+      return pluralize();
+    });
+  } else {
+    // Browser global.
+    root.pluralize = pluralize();
+  }
+})(this, function () {
+  // Rule storage - pluralize and singularize need to be run sequentially,
+  // while other rules can be optimized using an object for instant lookups.
+  var pluralRules = [];
+  var singularRules = [];
+  var uncountables = {};
+  var irregularPlurals = {};
+  var irregularSingles = {};
+
+  /**
+   * Title case a string.
+   *
+   * @param  {string} str
+   * @return {string}
+   */
+  function toTitleCase (str) {
+    return str.charAt(0).toUpperCase() + str.substr(1).toLowerCase();
+  }
+
+  /**
+   * Sanitize a pluralization rule to a usable regular expression.
+   *
+   * @param  {(RegExp|string)} rule
+   * @return {RegExp}
+   */
+  function sanitizeRule (rule) {
+    if (typeof rule === 'string') {
+      return new RegExp('^' + rule + '$', 'i');
+    }
+
+    return rule;
+  }
+
+  /**
+   * Pass in a word token to produce a function that can replicate the case on
+   * another word.
+   *
+   * @param  {string}   word
+   * @param  {string}   token
+   * @return {Function}
+   */
+  function restoreCase (word, token) {
+    // Tokens are an exact match.
+    if (word === token) {
+      return token;
+    }
+
+    // Upper cased words. E.g. "HELLO".
+    if (word === word.toUpperCase()) {
+      return token.toUpperCase();
+    }
+
+    // Title cased words. E.g. "Title".
+    if (word[0] === word[0].toUpperCase()) {
+      return toTitleCase(token);
+    }
+
+    // Lower cased words. E.g. "test".
+    return token.toLowerCase();
+  }
+
+  /**
+   * Interpolate a regexp string.
+   *
+   * @param  {string} str
+   * @param  {Array}  args
+   * @return {string}
+   */
+  function interpolate (str, args) {
+    return str.replace(/\$(\d{1,2})/g, function (match, index) {
+      return args[index] || '';
+    });
+  }
+
+  /**
+   * Sanitize a word by passing in the word and sanitization rules.
+   *
+   * @param  {string}   token
+   * @param  {string}   word
+   * @param  {Array}    collection
+   * @return {string}
+   */
+  function sanitizeWord (token, word, collection) {
+    // Empty string or doesn't need fixing.
+    if (!token.length || uncountables.hasOwnProperty(token)) {
+      return word;
+    }
+
+    var len = collection.length;
+
+    // Iterate over the sanitization rules and use the first one to match.
+    while (len--) {
+      var rule = collection[len];
+
+      // If the rule passes, return the replacement.
+      if (rule[0].test(word)) {
+        return word.replace(rule[0], function (match, index, word) {
+          var result = interpolate(rule[1], arguments);
+
+          if (match === '') {
+            return restoreCase(word[index - 1], result);
+          }
+
+          return restoreCase(match, result);
+        });
+      }
+    }
+
+    return word;
+  }
+
+  /**
+   * Replace a word with the updated word.
+   *
+   * @param  {Object}   replaceMap
+   * @param  {Object}   keepMap
+   * @param  {Array}    rules
+   * @return {Function}
+   */
+  function replaceWord (replaceMap, keepMap, rules) {
+    return function (word) {
+      // Get the correct token and case restoration functions.
+      var token = word.toLowerCase();
+
+      // Check against the keep object map.
+      if (keepMap.hasOwnProperty(token)) {
+        return restoreCase(word, token);
+      }
+
+      // Check against the replacement map for a direct word replacement.
+      if (replaceMap.hasOwnProperty(token)) {
+        return restoreCase(word, replaceMap[token]);
+      }
+
+      // Run all the rules against the word.
+      return sanitizeWord(token, word, rules);
+    };
+  }
+
+  /**
+   * Pluralize or singularize a word based on the passed in count.
+   *
+   * @param  {string}  word
+   * @param  {number}  count
+   * @param  {boolean} inclusive
+   * @return {string}
+   */
+  function pluralize (word, count, inclusive) {
+    var pluralized = count === 1
+      ? pluralize.singular(word) : pluralize.plural(word);
+
+    return (inclusive ? count + ' ' : '') + pluralized;
+  }
+
+  /**
+   * Pluralize a word.
+   *
+   * @type {Function}
+   */
+  pluralize.plural = replaceWord(
+    irregularSingles, irregularPlurals, pluralRules
+  );
+
+  /**
+   * Singularize a word.
+   *
+   * @type {Function}
+   */
+  pluralize.singular = replaceWord(
+    irregularPlurals, irregularSingles, singularRules
+  );
+
+  /**
+   * Add a pluralization rule to the collection.
+   *
+   * @param {(string|RegExp)} rule
+   * @param {string}          replacement
+   */
+  pluralize.addPluralRule = function (rule, replacement) {
+    pluralRules.push([sanitizeRule(rule), replacement]);
+  };
+
+  /**
+   * Add a singularization rule to the collection.
+   *
+   * @param {(string|RegExp)} rule
+   * @param {string}          replacement
+   */
+  pluralize.addSingularRule = function (rule, replacement) {
+    singularRules.push([sanitizeRule(rule), replacement]);
+  };
+
+  /**
+   * Add an uncountable word rule.
+   *
+   * @param {(string|RegExp)} word
+   */
+  pluralize.addUncountableRule = function (word) {
+    if (typeof word === 'string') {
+      uncountables[word.toLowerCase()] = true;
+      return;
+    }
+
+    // Set singular and plural references for the word.
+    pluralize.addPluralRule(word, '$0');
+    pluralize.addSingularRule(word, '$0');
+  };
+
+  /**
+   * Add an irregular word definition.
+   *
+   * @param {string} single
+   * @param {string} plural
+   */
+  pluralize.addIrregularRule = function (single, plural) {
+    plural = plural.toLowerCase();
+    single = single.toLowerCase();
+
+    irregularSingles[single] = plural;
+    irregularPlurals[plural] = single;
+  };
+
+  /**
+   * Irregular rules.
+   */
+  [
+    // Pronouns.
+    ['I', 'we'],
+    ['me', 'us'],
+    ['he', 'they'],
+    ['she', 'they'],
+    ['them', 'them'],
+    ['myself', 'ourselves'],
+    ['yourself', 'yourselves'],
+    ['itself', 'themselves'],
+    ['herself', 'themselves'],
+    ['himself', 'themselves'],
+    ['themself', 'themselves'],
+    ['is', 'are'],
+    ['was', 'were'],
+    ['has', 'have'],
+    ['this', 'these'],
+    ['that', 'those'],
+    // Words ending in with a consonant and `o`.
+    ['echo', 'echoes'],
+    ['dingo', 'dingoes'],
+    ['volcano', 'volcanoes'],
+    ['tornado', 'tornadoes'],
+    ['torpedo', 'torpedoes'],
+    // Ends with `us`.
+    ['genus', 'genera'],
+    ['viscus', 'viscera'],
+    // Ends with `ma`.
+    ['stigma', 'stigmata'],
+    ['stoma', 'stomata'],
+    ['dogma', 'dogmata'],
+    ['lemma', 'lemmata'],
+    ['schema', 'schemata'],
+    ['anathema', 'anathemata'],
+    // Other irregular rules.
+    ['ox', 'oxen'],
+    ['axe', 'axes'],
+    ['die', 'dice'],
+    ['yes', 'yeses'],
+    ['foot', 'feet'],
+    ['eave', 'eaves'],
+    ['goose', 'geese'],
+    ['tooth', 'teeth'],
+    ['quiz', 'quizzes'],
+    ['human', 'humans'],
+    ['proof', 'proofs'],
+    ['carve', 'carves'],
+    ['valve', 'valves'],
+    ['looey', 'looies'],
+    ['thief', 'thieves'],
+    ['groove', 'grooves'],
+    ['pickaxe', 'pickaxes'],
+    ['whiskey', 'whiskies']
+  ].forEach(function (rule) {
+    return pluralize.addIrregularRule(rule[0], rule[1]);
+  });
+
+  /**
+   * Pluralization rules.
+   */
+  [
+    [/s?$/i, 's'],
+    [/([^aeiou]ese)$/i, '$1'],
+    [/(ax|test)is$/i, '$1es'],
+    [/(alias|[^aou]us|tlas|gas|ris)$/i, '$1es'],
+    [/(e[mn]u)s?$/i, '$1s'],
+    [/([^l]ias|[aeiou]las|[emjzr]as|[iu]am)$/i, '$1'],
+    [/(alumn|syllab|octop|vir|radi|nucle|fung|cact|stimul|termin|bacill|foc|uter|loc|strat)(?:us|i)$/i, '$1i'],
+    [/(alumn|alg|vertebr)(?:a|ae)$/i, '$1ae'],
+    [/(seraph|cherub)(?:im)?$/i, '$1im'],
+    [/(her|at|gr)o$/i, '$1oes'],
+    [/(agend|addend|millenni|dat|extrem|bacteri|desiderat|strat|candelabr|errat|ov|symposi|curricul|automat|quor)(?:a|um)$/i, '$1a'],
+    [/(apheli|hyperbat|periheli|asyndet|noumen|phenomen|criteri|organ|prolegomen|hedr|automat)(?:a|on)$/i, '$1a'],
+    [/sis$/i, 'ses'],
+    [/(?:(kni|wi|li)fe|(ar|l|ea|eo|oa|hoo)f)$/i, '$1$2ves'],
+    [/([^aeiouy]|qu)y$/i, '$1ies'],
+    [/([^ch][ieo][ln])ey$/i, '$1ies'],
+    [/(x|ch|ss|sh|zz)$/i, '$1es'],
+    [/(matr|cod|mur|sil|vert|ind|append)(?:ix|ex)$/i, '$1ices'],
+    [/(m|l)(?:ice|ouse)$/i, '$1ice'],
+    [/(pe)(?:rson|ople)$/i, '$1ople'],
+    [/(child)(?:ren)?$/i, '$1ren'],
+    [/eaux$/i, '$0'],
+    [/m[ae]n$/i, 'men'],
+    ['thou', 'you']
+  ].forEach(function (rule) {
+    return pluralize.addPluralRule(rule[0], rule[1]);
+  });
+
+  /**
+   * Singularization rules.
+   */
+  [
+    [/s$/i, ''],
+    [/(ss)$/i, '$1'],
+    [/((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(?:sis|ses)$/i, '$1sis'],
+    [/(^analy)(?:sis|ses)$/i, '$1sis'],
+    [/(wi|kni|(?:after|half|high|low|mid|non|night|[^\w]|^)li)ves$/i, '$1fe'],
+    [/(ar|(?:wo|[ae])l|[eo][ao])ves$/i, '$1f'],
+    [/ies$/i, 'y'],
+    [/\b([pl]|zomb|(?:neck|cross)?t|coll|faer|food|gen|goon|group|lass|talk|goal|cut)ies$/i, '$1ie'],
+    [/\b(mon|smil)ies$/i, '$1ey'],
+    [/(m|l)ice$/i, '$1ouse'],
+    [/(seraph|cherub)im$/i, '$1'],
+    [/(x|ch|ss|sh|zz|tto|go|cho|alias|[^aou]us|tlas|gas|(?:her|at|gr)o|ris)(?:es)?$/i, '$1'],
+    [/(e[mn]u)s?$/i, '$1'],
+    [/(movie|twelve)s$/i, '$1'],
+    [/(cris|test|diagnos)(?:is|es)$/i, '$1is'],
+    [/(alumn|syllab|octop|vir|radi|nucle|fung|cact|stimul|termin|bacill|foc|uter|loc|strat)(?:us|i)$/i, '$1us'],
+    [/(agend|addend|millenni|dat|extrem|bacteri|desiderat|strat|candelabr|errat|ov|symposi|curricul|quor)a$/i, '$1um'],
+    [/(apheli|hyperbat|periheli|asyndet|noumen|phenomen|criteri|organ|prolegomen|hedr|automat)a$/i, '$1on'],
+    [/(alumn|alg|vertebr)ae$/i, '$1a'],
+    [/(cod|mur|sil|vert|ind)ices$/i, '$1ex'],
+    [/(matr|append)ices$/i, '$1ix'],
+    [/(pe)(rson|ople)$/i, '$1rson'],
+    [/(child)ren$/i, '$1'],
+    [/(eau)x?$/i, '$1'],
+    [/men$/i, 'man']
+  ].forEach(function (rule) {
+    return pluralize.addSingularRule(rule[0], rule[1]);
+  });
+
+  /**
+   * Uncountable rules.
+   */
+  [
+    // Singular words with no plurals.
+    'advice',
+    'adulthood',
+    'agenda',
+    'aid',
+    'alcohol',
+    'ammo',
+    'athletics',
+    'bison',
+    'blood',
+    'bream',
+    'buffalo',
+    'butter',
+    'carp',
+    'cash',
+    'chassis',
+    'chess',
+    'clothing',
+    'commerce',
+    'cod',
+    'cooperation',
+    'corps',
+    'digestion',
+    'debris',
+    'diabetes',
+    'energy',
+    'equipment',
+    'elk',
+    'excretion',
+    'expertise',
+    'flounder',
+    'fun',
+    'gallows',
+    'garbage',
+    'graffiti',
+    'headquarters',
+    'health',
+    'herpes',
+    'highjinks',
+    'homework',
+    'housework',
+    'information',
+    'jeans',
+    'justice',
+    'kudos',
+    'labour',
+    'literature',
+    'machinery',
+    'mackerel',
+    'mail',
+    'media',
+    'mews',
+    'moose',
+    'music',
+    'news',
+    'pike',
+    'plankton',
+    'pliers',
+    'pollution',
+    'premises',
+    'rain',
+    'research',
+    'rice',
+    'salmon',
+    'scissors',
+    'series',
+    'sewage',
+    'shambles',
+    'shrimp',
+    'species',
+    'staff',
+    'swine',
+    'trout',
+    'traffic',
+    'transporation',
+    'tuna',
+    'wealth',
+    'welfare',
+    'whiting',
+    'wildebeest',
+    'wildlife',
+    'you',
+    // Regexes.
+    /pox$/i, // "chickpox", "smallpox"
+    /ois$/i,
+    /deer$/i, // "deer", "reindeer"
+    /fish$/i, // "fish", "blowfish", "angelfish"
+    /sheep$/i,
+    /measles$/i,
+    /[^aeiou]ese$/i // "chinese", "japanese"
+  ].forEach(pluralize.addUncountableRule);
+
+  return pluralize;
+});
\ No newline at end of file


[09/17] ambari git commit: AMBARI-18474. Kerberos wizard loses request id on server restart (alexantonenko)

Posted by jo...@apache.org.
AMBARI-18474. Kerberos wizard loses request id on server restart (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c265ae62
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c265ae62
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c265ae62

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: c265ae6228342146dc99e9b5681d8a2645a220fa
Parents: 7b92434
Author: Alex Antonenko <hi...@gmail.com>
Authored: Tue Sep 27 18:32:12 2016 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Tue Sep 27 21:52:49 2016 +0300

----------------------------------------------------------------------
 ambari-web/app/models/cluster_states.js      | 4 +++-
 ambari-web/app/routes/add_kerberos_routes.js | 4 +++-
 2 files changed, 6 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c265ae62/ambari-web/app/models/cluster_states.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/cluster_states.js b/ambari-web/app/models/cluster_states.js
index 7e80c32..cbc134d 100644
--- a/ambari-web/app/models/cluster_states.js
+++ b/ambari-web/app/models/cluster_states.js
@@ -146,7 +146,9 @@ App.clusterStatus = Em.Object.create(App.UserPref, {
         this.set('localdb', response.localdb);
         // restore HAWizard data if process was started
         var isHAWizardStarted = App.isAuthorized('SERVICE.ENABLE_HA') && !App.isEmptyObject(response.localdb.HighAvailabilityWizard);
-        if (params.data.overrideLocaldb || isHAWizardStarted) {
+        // restore Kerberos Wizard is started
+        var isKerberosWizardStarted = App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') && !App.isEmptyObject(response.localdb.KerberosWizard);
+        if (params.data.overrideLocaldb || isHAWizardStarted || isKerberosWizardStarted) {
           var localdbTables = (App.db.data.app && App.db.data.app.tables) ? App.db.data.app.tables : {};
           var authenticated = Em.get(App, 'db.data.app.authenticated') || false;
           App.db.data = response.localdb;

http://git-wip-us.apache.org/repos/asf/ambari/blob/c265ae62/ambari-web/app/routes/add_kerberos_routes.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/add_kerberos_routes.js b/ambari-web/app/routes/add_kerberos_routes.js
index 4291394..1568577 100644
--- a/ambari-web/app/routes/add_kerberos_routes.js
+++ b/ambari-web/app/routes/add_kerberos_routes.js
@@ -20,7 +20,9 @@ var App = require('app');
 module.exports = App.WizardRoute.extend({
   route: '/enable',
   enter: function (router) {
-    router.get('mainController').dataLoading().done(function () {
+    router.get('mainController').dataLoading().done(function() {
+      return App.clusterStatus.updateFromServer();
+    }).done(function () {
       var kerberosWizardController = router.get('kerberosWizardController');
       App.router.get('updateController').set('isWorking', false);
       var popup = App.ModalPopup.show({


[03/17] ambari git commit: AMBARI-18439 - [Grafana] Add Kafka-Offset dashboard for Storm (prajwal)

Posted by jo...@apache.org.
AMBARI-18439 - [Grafana] Add Kafka-Offset dashboard for Storm (prajwal)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7c8ada18
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7c8ada18
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7c8ada18

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 7c8ada187d9f80ecc93a901b403c4e13821888b5
Parents: 69e8f6f
Author: Prajwal Rao <pr...@gmail.com>
Authored: Mon Sep 26 13:54:08 2016 -0700
Committer: Prajwal Rao <pr...@gmail.com>
Committed: Mon Sep 26 13:54:08 2016 -0700

----------------------------------------------------------------------
 .../ambari-metrics/datasource.js                |  64 +++++
 .../HDF/grafana-storm-kafka-offset.json         | 258 +++++++++++++++++++
 .../HDP/grafana-storm-kafka-offset.json         | 258 +++++++++++++++++++
 3 files changed, 580 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7c8ada18/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
index fa0cc47..626439d 100644
--- a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
+++ b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
@@ -418,6 +418,21 @@ define([
               }));
             }
 
+            //Templatized Dashboard for Storm Kafka Offset
+            if (templateSrv.variables[0].query === "topologies" && templateSrv.variables[1] &&
+                templateSrv.variables[1].name === "topic") {
+              var selectedTopology = templateSrv._values.topologies;
+              var selectedTopic = templateSrv._values.topic;
+              metricsPromises.push(_.map(options.targets, function(target) {
+                target.sTopology = selectedTopology;
+                target.sTopic = selectedTopic;
+                target.sPartition = options.scopedVars.partition.value;
+                target.sTopoMetric = target.metric.replace('*', target.sTopology).replace('*', target.sTopic)
+                    .replace('*', target.sPartition);
+                return getStormData(target);
+              }));
+            }
+
             // To speed up querying on templatized dashboards.
             if (templateSrv.variables[1] && templateSrv.variables[1].name === "hosts") {
               var allHosts = templateSrv._values.hosts.lastIndexOf('}') > 0 ? templateSrv._values.hosts.slice(1,-1) :
@@ -616,6 +631,55 @@ define([
                   });
                 });
           }
+          var stormEntities = {};
+          AmbariMetricsDatasource.prototype.getStormEntities = function () {
+            return this.initMetricAppidMapping()
+                .then(function () {
+                  var storm = allMetrics["nimbus"];
+                  var extractTopologies = storm.filter(/./.test.bind(new
+                      RegExp("partition", 'g')));
+                  _.map(extractTopologies, function(topology){
+                    topology = topology.split('.').slice(0,5);
+                    var topologyName = topologyN = topology[1]; // Topology
+                    var topologyTopicName = topicN = topology[3]; // Topic
+                    var topologyTopicPartitionName = topology[4]; // Partition
+                    if (stormEntities[topologyName]) {
+                      if (stormEntities[topologyName][topologyTopicName]) {
+                        stormEntities[topologyName][topologyTopicName].push(topologyTopicPartitionName);
+                      } else {
+                        stormEntities[topologyName][topologyTopicName] = [topologyTopicPartitionName];
+                      }
+                    } else {
+                      stormEntities[topologyName] = {};
+                      stormEntities[topologyName][topologyTopicName] = [topologyTopicPartitionName];
+                    }
+                  });
+                });
+          };
+          //Templated Variables for Storm Topics per Topology
+          if (interpolated.includes("stormTopic")) {
+            var topicName = interpolated.substring(0,interpolated.indexOf('.'));
+            return this.getStormEntities().then(function () {
+              var topicNames = Object.keys(stormEntities[topicName]);
+              return _.map(topicNames, function(names){
+                return {
+                  text: names
+                };
+              });
+            });
+          }
+          //Templated Variables for Storm Partitions per Topic
+          if (interpolated.includes("stormPartition")) {
+            var topicN, topologyN;
+            return this.getStormEntities().then(function () {
+              var partitionNames = _.uniq(stormEntities[topologyN][topicN]);
+              return _.map(partitionNames, function(names){
+                return {
+                  text: names
+                };
+              });
+            });
+          }
           // Templated Variable for YARN Queues.
           // It will search the cluster and populate the queues.
           if(interpolated === "yarnqueues") {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c8ada18/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDF/grafana-storm-kafka-offset.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDF/grafana-storm-kafka-offset.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDF/grafana-storm-kafka-offset.json
new file mode 100644
index 0000000..ac1f829
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDF/grafana-storm-kafka-offset.json
@@ -0,0 +1,258 @@
+{
+  "id": null,
+  "title": "Storm - Kafka-Offset",
+  "originalTitle": "Storm - Kafka-Offset",
+  "tags": [
+    "storm",
+    "builtin",
+    "2.4.0.0"
+  ],
+  "style": "dark",
+  "timezone": "browser",
+  "editable": true,
+  "hideControls": false,
+  "sharedCrosshair": false,
+  "rows": [
+    {
+      "collapse": false,
+      "editable": true,
+      "height": "25px",
+      "panels": [
+        {
+          "content": "<h4 align=\"center\">Metrics to see the status for the Storm topics on a per partition level. Click on each row title to expand on demand to look at various metrics. </h4>\n<h6 style=\"color:red;\" align=\"center\">This dashboard is managed by Ambari.  You may lose any changes made to this dashboard.  If you want to customize, make your own copy.</h6>\n<h5 align=\"center\">Note: Period ('.') contained topology names are not supported.</h5>",
+          "editable": true,
+          "error": false,
+          "id": 14,
+          "isNew": true,
+          "links": [],
+          "mode": "html",
+          "span": 12,
+          "style": {},
+          "title": "",
+          "type": "text"
+        }
+      ],
+      "title": "New row"
+    },
+    {
+      "collapse": false,
+      "editable": true,
+      "height": "250px",
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "datasource": null,
+          "editable": true,
+          "error": false,
+          "fill": 1,
+          "grid": {
+            "leftLogBase": 1,
+            "leftMax": null,
+            "leftMin": null,
+            "rightLogBase": 1,
+            "rightMax": null,
+            "rightMin": null,
+            "threshold1": null,
+            "threshold1Color": "rgba(216, 200, 27, 0.27)",
+            "threshold2": null,
+            "threshold2Color": "rgba(234, 112, 112, 0.22)"
+          },
+          "id": 15,
+          "isNew": true,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "minSpan": 4,
+          "nullPointMode": "connected",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "repeat": "partition",
+          "scopedVars": {
+            "partition": {
+              "text": "partition-0",
+              "value": "partition-0",
+              "selected": true
+            }
+          },
+          "seriesOverrides": [],
+          "span": 12,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "aggregator": "avg",
+              "alias": "Kafka Offset",
+              "app": "nimbus",
+              "downsampleAggregator": "avg",
+              "errors": {},
+              "hosts": "",
+              "metric": "topology.*.kafka-topic.*.*.latestTimeOffset",
+              "precision": "default",
+              "refId": "B",
+              "sPartition": "partition-0",
+              "sTopic": "myKafkaTopic",
+              "sTopoMetric": "topology.kafka-topology.kafka-topic.myKafkaTopic.partition-0.latestTimeOffset",
+              "sTopology": "kafka-topology",
+              "seriesAggregator": "none",
+              "transform": "none"
+            },
+            {
+              "aggregator": "avg",
+              "alias": "Storm Offset",
+              "app": "nimbus",
+              "downsampleAggregator": "avg",
+              "errors": {},
+              "metric": "topology.*.kafka-topic.*.*.latestCompletedOffset",
+              "precision": "default",
+              "refId": "A",
+              "sComponent": "--acker",
+              "sPartition": "partition-0",
+              "sTopic": "myKafkaTopic",
+              "sTopoMetric": "topology.kafka-topology.kafka-topic.myKafkaTopic.partition-0.latestCompletedOffset",
+              "sTopology": "kafka-topology",
+              "seriesAggregator": "none",
+              "transform": "none"
+            }
+          ],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "$partition",
+          "tooltip": {
+            "shared": true,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "x-axis": true,
+          "y-axis": true,
+          "y_formats": [
+            "short",
+            "short"
+          ]
+        }
+      ],
+      "title": "New row"
+    }
+  ],
+  "time": {
+    "from": "now-6h",
+    "to": "now"
+  },
+  "timepicker": {
+    "now": true,
+    "refresh_intervals": [
+      "5s",
+      "10s",
+      "30s",
+      "1m",
+      "5m",
+      "15m",
+      "30m",
+      "1h",
+      "2h",
+      "1d"
+    ],
+    "time_options": [
+      "5m",
+      "15m",
+      "1h",
+      "6h",
+      "12h",
+      "24h",
+      "2d",
+      "7d",
+      "30d"
+    ]
+  },
+  "templating": {
+    "list": [
+      {
+        "allFormat": "glob",
+        "current": {
+          "text": "kafka-topology",
+          "value": "kafka-topology"
+        },
+        "datasource": null,
+        "includeAll": false,
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "topologies",
+        "options": [
+        ],
+        "query": "topologies",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
+        "current": {
+          "text": "",
+          "value": ""
+        },
+        "datasource": null,
+        "includeAll": false,
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "topic",
+        "options": [
+          
+        ],
+        "query": "$topologies.stormTopic",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
+        "current": {
+          "text": "partition-0",
+          "value": "partition-0"
+        },
+        "datasource": null,
+        "includeAll": true,
+        "multi": true,
+        "multiFormat": "glob",
+        "name": "partition",
+        "options": [
+          {
+            "text": "partition-0",
+            "value": "partition-0",
+            "selected": true
+          }
+        ],
+        "query": "$topic.stormPartition",
+        "refresh": true,
+        "type": "query"
+      }
+    ]
+  },
+  "annotations": {
+    "list": []
+  },
+  "refresh": false,
+  "schemaVersion": 8,
+  "version": 23,
+  "links": [
+    {
+      "asDropdown": true,
+      "icon": "external link",
+      "tags": [
+        "storm"
+      ],
+      "title": "Storm Dashboards",
+      "type": "dashboards"
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c8ada18/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-storm-kafka-offset.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-storm-kafka-offset.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-storm-kafka-offset.json
new file mode 100644
index 0000000..ac1f829
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-storm-kafka-offset.json
@@ -0,0 +1,258 @@
+{
+  "id": null,
+  "title": "Storm - Kafka-Offset",
+  "originalTitle": "Storm - Kafka-Offset",
+  "tags": [
+    "storm",
+    "builtin",
+    "2.4.0.0"
+  ],
+  "style": "dark",
+  "timezone": "browser",
+  "editable": true,
+  "hideControls": false,
+  "sharedCrosshair": false,
+  "rows": [
+    {
+      "collapse": false,
+      "editable": true,
+      "height": "25px",
+      "panels": [
+        {
+          "content": "<h4 align=\"center\">Metrics to see the status for the Storm topics on a per partition level. Click on each row title to expand on demand to look at various metrics. </h4>\n<h6 style=\"color:red;\" align=\"center\">This dashboard is managed by Ambari.  You may lose any changes made to this dashboard.  If you want to customize, make your own copy.</h6>\n<h5 align=\"center\">Note: Period ('.') contained topology names are not supported.</h5>",
+          "editable": true,
+          "error": false,
+          "id": 14,
+          "isNew": true,
+          "links": [],
+          "mode": "html",
+          "span": 12,
+          "style": {},
+          "title": "",
+          "type": "text"
+        }
+      ],
+      "title": "New row"
+    },
+    {
+      "collapse": false,
+      "editable": true,
+      "height": "250px",
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "datasource": null,
+          "editable": true,
+          "error": false,
+          "fill": 1,
+          "grid": {
+            "leftLogBase": 1,
+            "leftMax": null,
+            "leftMin": null,
+            "rightLogBase": 1,
+            "rightMax": null,
+            "rightMin": null,
+            "threshold1": null,
+            "threshold1Color": "rgba(216, 200, 27, 0.27)",
+            "threshold2": null,
+            "threshold2Color": "rgba(234, 112, 112, 0.22)"
+          },
+          "id": 15,
+          "isNew": true,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "minSpan": 4,
+          "nullPointMode": "connected",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "repeat": "partition",
+          "scopedVars": {
+            "partition": {
+              "text": "partition-0",
+              "value": "partition-0",
+              "selected": true
+            }
+          },
+          "seriesOverrides": [],
+          "span": 12,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "aggregator": "avg",
+              "alias": "Kafka Offset",
+              "app": "nimbus",
+              "downsampleAggregator": "avg",
+              "errors": {},
+              "hosts": "",
+              "metric": "topology.*.kafka-topic.*.*.latestTimeOffset",
+              "precision": "default",
+              "refId": "B",
+              "sPartition": "partition-0",
+              "sTopic": "myKafkaTopic",
+              "sTopoMetric": "topology.kafka-topology.kafka-topic.myKafkaTopic.partition-0.latestTimeOffset",
+              "sTopology": "kafka-topology",
+              "seriesAggregator": "none",
+              "transform": "none"
+            },
+            {
+              "aggregator": "avg",
+              "alias": "Storm Offset",
+              "app": "nimbus",
+              "downsampleAggregator": "avg",
+              "errors": {},
+              "metric": "topology.*.kafka-topic.*.*.latestCompletedOffset",
+              "precision": "default",
+              "refId": "A",
+              "sComponent": "--acker",
+              "sPartition": "partition-0",
+              "sTopic": "myKafkaTopic",
+              "sTopoMetric": "topology.kafka-topology.kafka-topic.myKafkaTopic.partition-0.latestCompletedOffset",
+              "sTopology": "kafka-topology",
+              "seriesAggregator": "none",
+              "transform": "none"
+            }
+          ],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "$partition",
+          "tooltip": {
+            "shared": true,
+            "value_type": "cumulative"
+          },
+          "type": "graph",
+          "x-axis": true,
+          "y-axis": true,
+          "y_formats": [
+            "short",
+            "short"
+          ]
+        }
+      ],
+      "title": "New row"
+    }
+  ],
+  "time": {
+    "from": "now-6h",
+    "to": "now"
+  },
+  "timepicker": {
+    "now": true,
+    "refresh_intervals": [
+      "5s",
+      "10s",
+      "30s",
+      "1m",
+      "5m",
+      "15m",
+      "30m",
+      "1h",
+      "2h",
+      "1d"
+    ],
+    "time_options": [
+      "5m",
+      "15m",
+      "1h",
+      "6h",
+      "12h",
+      "24h",
+      "2d",
+      "7d",
+      "30d"
+    ]
+  },
+  "templating": {
+    "list": [
+      {
+        "allFormat": "glob",
+        "current": {
+          "text": "kafka-topology",
+          "value": "kafka-topology"
+        },
+        "datasource": null,
+        "includeAll": false,
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "topologies",
+        "options": [
+        ],
+        "query": "topologies",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
+        "current": {
+          "text": "",
+          "value": ""
+        },
+        "datasource": null,
+        "includeAll": false,
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "topic",
+        "options": [
+          
+        ],
+        "query": "$topologies.stormTopic",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
+        "current": {
+          "text": "partition-0",
+          "value": "partition-0"
+        },
+        "datasource": null,
+        "includeAll": true,
+        "multi": true,
+        "multiFormat": "glob",
+        "name": "partition",
+        "options": [
+          {
+            "text": "partition-0",
+            "value": "partition-0",
+            "selected": true
+          }
+        ],
+        "query": "$topic.stormPartition",
+        "refresh": true,
+        "type": "query"
+      }
+    ]
+  },
+  "annotations": {
+    "list": []
+  },
+  "refresh": false,
+  "schemaVersion": 8,
+  "version": 23,
+  "links": [
+    {
+      "asDropdown": true,
+      "icon": "external link",
+      "tags": [
+        "storm"
+      ],
+      "title": "Storm Dashboards",
+      "type": "dashboards"
+    }
+  ]
+}
\ No newline at end of file


[16/17] ambari git commit: AMBARI-18472 Can't switch to not current version for not default config gorup. (ababiichuk)

Posted by jo...@apache.org.
AMBARI-18472 Can't switch to not current version for not default config gorup. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aad2133f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aad2133f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aad2133f

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: aad2133f857e6067f37a01cce8870f27a72d933b
Parents: e44b880
Author: ababiichuk <ab...@hortonworks.com>
Authored: Wed Sep 28 10:50:10 2016 +0300
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Wed Sep 28 15:30:43 2016 +0300

----------------------------------------------------------------------
 .../app/mixins/common/configs/configs_loader.js |  2 +-
 .../views/common/configs/config_history_flow.js | 46 ++++++++++----------
 .../common/configs/configs_loader_test.js       | 10 ++---
 3 files changed, 29 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/aad2133f/ambari-web/app/mixins/common/configs/configs_loader.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/configs/configs_loader.js b/ambari-web/app/mixins/common/configs/configs_loader.js
index 87dec7f..666cef8 100644
--- a/ambari-web/app/mixins/common/configs/configs_loader.js
+++ b/ambari-web/app/mixins/common/configs/configs_loader.js
@@ -71,6 +71,7 @@ App.ConfigsLoader = Em.Mixin.create(App.GroupsMappingMixin, {
     if (!this.get('preSelectedConfigVersion')) {
       this.set('selectedVersion', this.get('currentDefaultVersion'));
     }
+    this.set('preSelectedConfigVersion', null);
   },
 
   /**
@@ -92,7 +93,6 @@ App.ConfigsLoader = Em.Mixin.create(App.GroupsMappingMixin, {
       });
       self.set('selectedConfigGroup', selectedGroup);
       self.loadSelectedVersion(preSelectedVersion.get('version'), selectedGroup);
-      self.set('preSelectedConfigVersion', null);
       preSelectedVersion = null;
     });
   },

http://git-wip-us.apache.org/repos/asf/ambari/blob/aad2133f/ambari-web/app/views/common/configs/config_history_flow.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/config_history_flow.js b/ambari-web/app/views/common/configs/config_history_flow.js
index 07f111a..5ad65ac 100644
--- a/ambari-web/app/views/common/configs/config_history_flow.js
+++ b/ambari-web/app/views/common/configs/config_history_flow.js
@@ -173,6 +173,7 @@ App.ConfigHistoryFlowView = Em.View.extend({
       placement: 'top'
     });
     this.$(".version-info-bar-wrapper").stick_in_parent({parent: '#serviceConfig', offset_top: 10});
+    this.onChangeConfigGroup();
   },
 
   willDestroyElement: function() {
@@ -211,38 +212,37 @@ App.ConfigHistoryFlowView = Em.View.extend({
   onChangeConfigGroup: function () {
     var serviceVersions = this.get('serviceVersions');
     var selectedGroupName = this.get('controller.selectedConfigGroup.name');
+    var preselectedVersion = this.get('controller.selectedVersion');
     var startIndex = 0;
     var currentIndex = 0;
+    var isCurrentInDefaultGroupIndex = null;
+
 
     serviceVersions.setEach('isDisplayed', false);
-    //display the version belongs to current group
-    if (this.get('controller.selectedConfigGroup.isDefault')) {
-      // display current in default group
-      serviceVersions.forEach(function (serviceVersion, index) {
-        // find current in default group
-        if (serviceVersion.get('isCurrent') && serviceVersion.get('groupName') === App.ServiceConfigGroup.defaultGroupName) {
-          serviceVersion.set('isDisplayed', true);
-          currentIndex = index + 1;
-        }
-      });
-    } else {
-      // display current in selected group
+    // display selected version from config history
+    serviceVersions.forEach(function (serviceVersion, index) {
+      // find selected version in group
+      if (serviceVersion.get('version') === preselectedVersion && serviceVersion.get('groupName') === selectedGroupName) {
+        serviceVersion.set('isDisplayed', true);
+        currentIndex = index + 1;
+      }
+    });
+    // display current in selected group
+    if (!currentIndex) {
       serviceVersions.forEach(function (serviceVersion, index) {
         // find current in selected group
         if (serviceVersion.get('isCurrent') && serviceVersion.get('groupName') === selectedGroupName) {
           serviceVersion.set('isDisplayed', true);
           currentIndex = index + 1;
         }
+        if (serviceVersion.get('isCurrent') && serviceVersion.get('groupName') === App.ServiceConfigGroup.defaultGroupName) {
+          isCurrentInDefaultGroupIndex = index;
+        }
       });
-      // no current version for selected group, show default group current version
-      if (currentIndex == 0) {
-        serviceVersions.forEach(function (serviceVersion, index) {
-          // find current in default group
-          if (serviceVersion.get('isCurrent') && serviceVersion.get('groupName') === App.ServiceConfigGroup.defaultGroupName) {
-            currentIndex = index + 1;
-            serviceVersion.set('isDisplayed', true);
-          }
-        });
+      // if there is no current version in selected group show current version from default group
+      if (!currentIndex) {
+        serviceVersions[isCurrentInDefaultGroupIndex].set('isDisplayed', true);
+        currentIndex = isCurrentInDefaultGroupIndex + 1;
       }
     }
     // show current version as the last one
@@ -251,7 +251,7 @@ App.ConfigHistoryFlowView = Em.View.extend({
     }
     this.set('startIndex', startIndex);
     this.adjustFlowView();
-  }.observes('controller.selectedConfigGroup.name'),
+  }.observes('controller.selectedConfigGroup'),
 
   /**
    *  define the first element in viewport
@@ -641,4 +641,4 @@ App.ConfigHistoryDropdownSubMenuView = Em.View.extend({
       $("#config_version_popup").removeAttr('style');
     }
   })
-});
\ No newline at end of file
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/aad2133f/ambari-web/test/mixins/common/configs/configs_loader_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/mixins/common/configs/configs_loader_test.js b/ambari-web/test/mixins/common/configs/configs_loader_test.js
index e718bbe..8f22b44 100644
--- a/ambari-web/test/mixins/common/configs/configs_loader_test.js
+++ b/ambari-web/test/mixins/common/configs/configs_loader_test.js
@@ -103,6 +103,11 @@ describe('App.ConfigsLoader', function() {
       mixin.loadServiceConfigVersionsSuccess({items: []});
       expect(mixin.get('selectedVersion')).to.be.equal('v1');
     });
+
+    it("preSelectedConfigVersion should be null", function() {
+      mixin.loadServiceConfigVersionsSuccess();
+      expect(mixin.get('preSelectedConfigVersion')).to.be.null;
+    });
   });
 
   describe("#loadPreSelectedConfigVersion()", function () {
@@ -213,11 +218,6 @@ describe('App.ConfigsLoader', function() {
       mixin.loadPreSelectedConfigVersion();
       expect(mixin.loadSelectedVersion.calledOnce).to.be.true;
     });
-
-    it("preSelectedConfigVersion should be null", function() {
-      mixin.loadPreSelectedConfigVersion();
-      expect(mixin.get('preSelectedConfigVersion')).to.be.null;
-    });
   });
 
   describe("#loadCurrentVersions()", function () {