You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2016/09/29 17:25:46 UTC

[01/14] ambari git commit: AMBARI-18469. Fix custom jdbc functionality for ambari-server setup.(vbrodetskyi)

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-18456 f0da4fa49 -> 817aed4bd


AMBARI-18469. Fix custom jdbc functionality for ambari-server setup.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/05e65e29
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/05e65e29
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/05e65e29

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 05e65e297a09e1498a8ee75fd55d059c00f99976
Parents: 20ce57b
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Wed Sep 28 09:26:32 2016 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Wed Sep 28 09:26:32 2016 +0300

----------------------------------------------------------------------
 .../python/ambari_server/dbConfiguration.py     |  52 +++++++--
 .../src/test/python/TestAmbariServer.py         | 107 +++++++------------
 2 files changed, 79 insertions(+), 80 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/05e65e29/ambari-server/src/main/python/ambari_server/dbConfiguration.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/dbConfiguration.py b/ambari-server/src/main/python/ambari_server/dbConfiguration.py
index ede257f..1d54dce 100644
--- a/ambari-server/src/main/python/ambari_server/dbConfiguration.py
+++ b/ambari-server/src/main/python/ambari_server/dbConfiguration.py
@@ -19,17 +19,19 @@ limitations under the License.
 '''
 import glob
 import os
+import shutil
 
 from ambari_commons import OSConst, OSCheck
 from ambari_commons.exceptions import FatalException
 from ambari_commons.logging_utils import get_silent, print_error_msg, print_info_msg, print_warning_msg, set_silent
 from ambari_commons.os_family_impl import OsFamilyImpl
 from ambari_commons.str_utils import cbool
+from ambari_server.serverClassPath import JDBC_DRIVER_PATH_PROPERTY
 from ambari_server.serverConfiguration import decrypt_password_for_alias, get_ambari_properties, get_is_secure, \
   get_resources_location, get_value_from_properties, is_alias_string, \
   JDBC_PASSWORD_PROPERTY, JDBC_RCA_PASSWORD_ALIAS, PRESS_ENTER_MSG, DEFAULT_DBMS_PROPERTY, JDBC_DATABASE_PROPERTY, \
-  PERSISTENCE_TYPE_PROPERTY
-from ambari_server.userInput import get_validated_string_input
+  PERSISTENCE_TYPE_PROPERTY, update_properties, configDefaults
+from ambari_server.userInput import get_YN_input, get_validated_string_input
 
 
 #Database settings
@@ -160,14 +162,46 @@ class DBMSConfig(object):
     pass
 
   def ensure_jdbc_driver_installed(self, properties):
-    (result, msg) = self._prompt_jdbc_driver_install(properties)
-    if result == -1:
-      print_error_msg(msg)
-      raise FatalException(-1, msg)
+    server_jdbc_path = properties.get_property(JDBC_DRIVER_PATH_PROPERTY)
+    if server_jdbc_path and os.path.isfile(server_jdbc_path):
+      return True
+
+    default_driver_path = self._get_default_driver_path(properties)
+    if default_driver_path and os.path.isfile(default_driver_path):
+      ambari_should_use_existing_default_jdbc = get_YN_input("Should ambari use existing default jdbc {0} [y/n] (y)? ".format(default_driver_path), True)
+      if ambari_should_use_existing_default_jdbc:
+        properties.process_pair(JDBC_DRIVER_PATH_PROPERTY, default_driver_path)
+        update_properties(properties)
+        return True
+
+    path_to_custom_jdbc_driver = get_validated_string_input("Enter full path to custom jdbc driver: ", None, None, None, False, False)
+    if path_to_custom_jdbc_driver and os.path.isfile(path_to_custom_jdbc_driver):
+      try:
+        custom_jdbc_name = os.path.basename(path_to_custom_jdbc_driver)
+        if not path_to_custom_jdbc_driver == os.path.join(configDefaults.JAVA_SHARE_PATH, custom_jdbc_name):
+          if os.path.isfile(os.path.join(configDefaults.JAVA_SHARE_PATH, custom_jdbc_name)):
+            replace_jdbc_in_share_dir = get_YN_input("You already have file {0} in /usr/share/java/. Should it be replaced? [y/n] (y)? ".format(custom_jdbc_name), True)
+            if replace_jdbc_in_share_dir:
+              try:
+                os.remove(os.path.join(configDefaults.JAVA_SHARE_PATH, custom_jdbc_name))
+              except Exception, ee:
+                err = 'ERROR: Could not remove jdbc file. %s' % os.path.join(configDefaults.JAVA_SHARE_PATH, custom_jdbc_name)
+                raise FatalException(1, err)
+          shutil.copy(path_to_custom_jdbc_driver, configDefaults.JAVA_SHARE_PATH)
+          print "Copying {0} to {1}".format(path_to_custom_jdbc_driver, configDefaults.JAVA_SHARE_PATH)
+      except Exception, e:
+        err = "Can not copy file {0} to {1} due to: {2} . Please check file " \
+          "permissions and free disk space.".format(path_to_custom_jdbc_driver, configDefaults.JAVA_SHARE_PATH, str(e))
+        raise FatalException(1, err)
+
+      properties.process_pair(JDBC_DRIVER_PATH_PROPERTY, path_to_custom_jdbc_driver)
+      update_properties(properties)
+      return True
+    else:
+      print_error_msg("Custom jdbc connector path is unavailable. Please put correct path to jdbc connector.")
+
+    return False
 
-    if result != 1:
-      result = self._install_jdbc_driver(properties, result)
-    return cbool(result)
 
   def change_db_files_owner(self):
     if self._is_local_database():

http://git-wip-us.apache.org/repos/asf/ambari/blob/05e65e29/ambari-server/src/test/python/TestAmbariServer.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py
index 5746503..ed200b8 100644
--- a/ambari-server/src/test/python/TestAmbariServer.py
+++ b/ambari-server/src/test/python/TestAmbariServer.py
@@ -125,7 +125,7 @@ with patch.object(platform, "linux_distribution", return_value = MagicMock(retur
                 from ambari_server.userInput import get_YN_input, get_choice_string_input, get_validated_string_input, \
                   read_password
                 from ambari_server_main import get_ulimit_open_files, ULIMIT_OPEN_FILES_KEY, ULIMIT_OPEN_FILES_DEFAULT
-                from ambari_server.serverClassPath import ServerClassPath
+                from ambari_server.serverClassPath import JDBC_DRIVER_PATH_PROPERTY, ServerClassPath
                 from ambari_server.hostUpdate import update_host_names
                 from ambari_server.checkDatabase import check_database
                 from ambari_server import serverConfiguration
@@ -6459,7 +6459,12 @@ class TestAmbariServer(TestCase):
   @patch("os.remove")
   @patch("os.symlink")
   @patch("shutil.copy")
-  def test_ensure_jdbc_drivers_installed(self, shutil_copy_mock, os_symlink_mock, os_remove_mock, lexists_mock, isdir_mock, glob_mock,
+  @patch("os.path.isfile")
+  @patch("ambari_server.dbConfiguration.get_YN_input")
+  @patch("ambari_server.dbConfiguration.update_properties")
+  @patch("ambari_server.dbConfiguration.get_validated_string_input")
+  def test_ensure_jdbc_drivers_installed(self, get_valid_str_in_mock, update_properties_mock, getYN_mock, isfile_mock, shutil_copy_mock,
+                              os_symlink_mock, os_remove_mock, lexists_mock, isdir_mock, glob_mock,
                               raw_input_mock, print_warning_msg, print_error_msg_mock, print_error_msg_2_mock,
                               get_ambari_properties_mock, get_ambari_properties_2_mock):
     out = StringIO.StringIO()
@@ -6472,7 +6477,11 @@ class TestAmbariServer(TestCase):
       print_error_msg_mock.reset_mock()
       print_warning_msg.reset_mock()
       raw_input_mock.reset_mock()
-
+      isfile_mock.reset_mock()
+      os_remove_mock.reset_mock
+      getYN_mock.reset_mock()
+      get_valid_str_in_mock.reset_mock()
+      update_properties_mock.reset_mock()
       args = MagicMock()
 
       del args.database_index
@@ -6485,12 +6494,13 @@ class TestAmbariServer(TestCase):
 
       return args
 
-    # Check positive scenario
+    # Check scenario when default jdbc was found and used
     drivers_list = [os.path.join(os.sep,'usr','share','java','ojdbc6.jar')]
     resources_dir = os.sep + 'tmp'
 
     props = Properties()
     props.process_pair(RESOURCES_DIR_PROPERTY, resources_dir)
+    props.process_pair(JDBC_DRIVER_PATH_PROPERTY, "/some/test/path/to/oracle.-jdbc.jar")
     get_ambari_properties_2_mock.return_value = get_ambari_properties_mock.return_value = props
 
     factory = DBMSConfigFactory()
@@ -6500,94 +6510,49 @@ class TestAmbariServer(TestCase):
     isdir_mock.return_value = True
 
     lexists_mock.return_value = True
+    isfile_mock.return_value = True
+    getYN_mock.return_value = True
 
     dbms = factory.create(args, props)
     rcode = dbms.ensure_jdbc_driver_installed(props)
 
-    self.assertEquals(os_symlink_mock.call_count, 1)
-    self.assertEquals(os_symlink_mock.call_args_list[0][0][0], os.path.join(os.sep,'tmp','ojdbc6.jar'))
-    self.assertEquals(os_symlink_mock.call_args_list[0][0][1], os.path.join(os.sep,'tmp','oracle-jdbc-driver.jar'))
+    self.assertEquals(update_properties_mock.call_count, 0)
     self.assertTrue(rcode)
-    self.assertEquals(shutil_copy_mock.call_count, 1)
-    self.assertEquals(shutil_copy_mock.call_args_list[0][0][0], drivers_list[0])
-    self.assertEquals(shutil_copy_mock.call_args_list[0][0][1], resources_dir)
-
-    # Check negative scenarios
-    # Silent option, no drivers
-    set_silent(True)
 
     args = reset_mocks()
-    glob_mock.return_value = []
-
-    failed = False
-
-    try:
-      dbms = factory.create(args, props)
-      rcode = dbms.ensure_jdbc_driver_installed(props)
-    except FatalException:
-      failed = True
-
-    self.assertTrue(print_error_msg_mock.called)
-    self.assertTrue(failed)
-
-    # Non-Silent option, no drivers
-    set_silent(False)
-
-    args = reset_mocks()
-    glob_mock.return_value = []
-
-    failed = False
-
-    try:
-      dbms = factory.create(args, props)
-      rcode = dbms.ensure_jdbc_driver_installed(props)
-    except FatalException:
-      failed = True
-
-    self.assertTrue(failed)
-    self.assertTrue(print_error_msg_mock.called)
-
-    # Non-Silent option, no drivers at first ask, present drivers after that
-    args = reset_mocks()
-
-    glob_mock.side_effect = [[], drivers_list, drivers_list]
+    isfile_mock.side_effect = [False, True]
+    getYN_mock.return_value = True
 
     dbms = factory.create(args, props)
     rcode = dbms.ensure_jdbc_driver_installed(props)
 
+    self.assertEquals(update_properties_mock.call_count, 1)
+    self.assertEquals(getYN_mock.call_count, 1)
     self.assertTrue(rcode)
-    self.assertEquals(shutil_copy_mock.call_count, 1)
-    self.assertEquals(shutil_copy_mock.call_args_list[0][0][0], drivers_list[0])
-    self.assertEquals(shutil_copy_mock.call_args_list[0][0][1], resources_dir)
 
-    # Non-Silent option, no drivers at first ask, no drivers after that
+    # check scenario when user entered valid jdbc full path
     args = reset_mocks()
-    glob_mock.side_effect = [[], []]
+    isfile_mock.side_effect = [False, False, True, True]
+    get_valid_str_in_mock.return_value = '/test/full/path/to/oracle_jdbc.jar'
 
-    failed = False
-
-    try:
-      dbms = factory.create(args, props)
-      rcode = dbms.ensure_jdbc_driver_installed(props)
-    except FatalException:
-      failed = True
+    rcode = dbms.ensure_jdbc_driver_installed(props)
 
-    self.assertTrue(failed)
-    self.assertTrue(print_error_msg_mock.called)
+    self.assertEquals(update_properties_mock.call_count, 1)
+    self.assertTrue(rcode)
+    self.assertEquals(props['server.jdbc.driver.path'], '/test/full/path/to/oracle_jdbc.jar')
+    self.assertEquals(shutil_copy_mock.call_count, 1)
+    self.assertEquals(shutil_copy_mock.call_count, 1)
+    self.assertEquals(os_remove_mock.call_count, 1)
 
-    # Failed to copy_files
+    # check scenario when no default jdbc and user entered incorrect full jdbc path
     args = reset_mocks()
-    glob_mock.side_effect = [[], drivers_list, drivers_list]
+    isfile_mock.side_effect = [False, False, False, False]
 
-    try:
-      dbms = factory.create(args, props)
-      rcode = dbms.ensure_jdbc_driver_installed(props)
-    except FatalException:
-      failed = True
+    rcode = dbms.ensure_jdbc_driver_installed(props)
 
-    self.assertTrue(failed)
+    self.assertFalse(rcode)
+    print_error_msg_mock.assert_called_once_with("Custom jdbc connector path is unavailable. Please put correct path to jdbc connector.")
 
-    sys.stdout = sys.__stdout__
     pass
 
 


[03/14] ambari git commit: AMBARI-18051 - Services should be able to provide their own pre-req checks by supplying a jar file

Posted by jo...@apache.org.
AMBARI-18051 - Services should be able to provide their own pre-req checks by supplying a jar file


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/20ce57b7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/20ce57b7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/20ce57b7

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 20ce57b7bb578dd337007d4411c695c95d6bf287
Parents: aad2133
Author: Tim Thorpe <tt...@apache.org>
Authored: Wed Sep 28 09:28:06 2016 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Wed Sep 28 09:28:06 2016 -0700

----------------------------------------------------------------------
 ambari-server/pom.xml | 23 +++++++++++++++++++++--
 1 file changed, 21 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/20ce57b7/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index d507b82..e37accd 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -578,6 +578,25 @@
       </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <version>1.7</version>
+        <executions>
+          <execution>
+            <id>clean-sample-upgrade-check-jar</id>
+            <phase>process-test-classes</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <delete dir="target/test-classes/checks" includeemptydirs="true"/>
+              </target>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-jar-plugin</artifactId>
         <version>3.0.2</version>
         <executions>
@@ -603,9 +622,9 @@
               <goal>run</goal>
             </goals>
             <configuration>
-              <tasks>
+              <target>
                 <mkdir dir="target/test-classes/extensions/EXT/0.1/services/OOZIE2/checks/tmp"/>
-              </tasks>
+              </target>
             </configuration>
           </execution>
         </executions>


[04/14] ambari git commit: AMBARI-18470. RU/EU cannot start because ServiceCheckValidityCheck incorrectly calculates Service Checks that ran (alejandro)

Posted by jo...@apache.org.
AMBARI-18470. RU/EU cannot start because ServiceCheckValidityCheck incorrectly calculates Service Checks that ran (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/84ecbb40
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/84ecbb40
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/84ecbb40

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 84ecbb404280b541f15e2b23068ecd449bdd930c
Parents: 05e65e2
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Mon Sep 26 14:55:47 2016 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Wed Sep 28 10:55:19 2016 -0700

----------------------------------------------------------------------
 .../server/checks/ServiceCheckValidityCheck.java    | 16 +++++++++-------
 .../checks/ServiceCheckValidityCheckTest.java       |  4 ++--
 2 files changed, 11 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/84ecbb40/ambari-server/src/main/java/org/apache/ambari/server/checks/ServiceCheckValidityCheck.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServiceCheckValidityCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServiceCheckValidityCheck.java
index 2bc22c9..b77e1d5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServiceCheckValidityCheck.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServiceCheckValidityCheck.java
@@ -19,6 +19,7 @@ package org.apache.ambari.server.checks;
 
 import java.text.SimpleDateFormat;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.LinkedHashSet;
@@ -31,9 +32,12 @@ import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.controller.PrereqCheckRequest;
 import org.apache.ambari.server.controller.internal.PageRequestImpl;
 import org.apache.ambari.server.controller.internal.RequestImpl;
+import org.apache.ambari.server.controller.internal.SortRequestImpl;
 import org.apache.ambari.server.controller.internal.TaskResourceProvider;
 import org.apache.ambari.server.controller.spi.PageRequest;
 import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.SortRequest;
+import org.apache.ambari.server.controller.spi.SortRequestProperty;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
@@ -65,8 +69,11 @@ public class ServiceCheckValidityCheck extends AbstractCheckDescriptor {
   private static final Logger LOG = LoggerFactory.getLogger(ServiceCheckValidityCheck.class);
 
   private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("MM-dd-yyyy hh:mm:ss");
+  private static List<SortRequestProperty> sortRequestProperties =
+      Collections.singletonList(new SortRequestProperty(TaskResourceProvider.TASK_START_TIME_PROPERTY_ID, SortRequest.Order.DESC));
+  private static SortRequest sortRequest = new SortRequestImpl(sortRequestProperties);
   private static final PageRequestImpl PAGE_REQUEST = new PageRequestImpl(PageRequest.StartingPoint.End, 1000, 0, null, null);
-  private static final RequestImpl REQUEST = new RequestImpl(null, null, null, null, null, PAGE_REQUEST);
+  private static final RequestImpl REQUEST = new RequestImpl(null, null, null, null, sortRequest, PAGE_REQUEST);
   private static final Predicate PREDICATE = new PredicateBuilder().property(TaskResourceProvider.TASK_COMMAND_PROPERTY_ID)
       .equals(RoleCommand.SERVICE_CHECK.name()).toPredicate();
 
@@ -117,14 +124,9 @@ public class ServiceCheckValidityCheck extends AbstractCheckDescriptor {
     for (HostRoleCommandEntity command : commands) {
       Role role = command.getRole();
 
+      // Because results are already sorted by start_time desc, first occurrence is guaranteed to have max(start_time).
       if (!latestTimestamps.containsKey(role)) {
         latestTimestamps.put(role, command);
-      } else {
-        Long latest = latestTimestamps.get(role).getStartTime();
-
-        if (command.getStartTime() > latest) {
-          latestTimestamps.put(role, command);
-        }
       }
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/84ecbb40/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
index 8d72b96..dd843a8 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
@@ -169,7 +169,7 @@ public class ServiceCheckValidityCheckTest {
     HostRoleCommandEntity hostRoleCommandEntity2 = new HostRoleCommandEntity();
     hostRoleCommandEntity2.setRoleCommand(RoleCommand.SERVICE_CHECK);
     hostRoleCommandEntity2.setCommandDetail(COMMAND_DETAIL);
-    hostRoleCommandEntity2.setStartTime(SERVICE_CHECK_START_TIME + 3000L);
+    hostRoleCommandEntity2.setStartTime(CONFIG_CREATE_TIMESTAMP - 1L);
     hostRoleCommandEntity2.setRole(Role.HDFS_SERVICE_CHECK);
 
     when(serviceConfigDAO.getLastServiceConfig(eq(CLUSTER_ID), eq(SERVICE_NAME))).thenReturn(serviceConfigEntity);
@@ -177,6 +177,6 @@ public class ServiceCheckValidityCheckTest {
 
     PrerequisiteCheck check = new PrerequisiteCheck(null, CLUSTER_NAME);
     serviceCheckValidityCheck.perform(check, new PrereqCheckRequest(CLUSTER_NAME));
-    Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus());
+    Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
   }
 }
\ No newline at end of file


[14/14] ambari git commit: Merge branch 'trunk' into branch-feature-AMBARI-18456

Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/817aed4b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/817aed4b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/817aed4b

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 817aed4bdd800801cc9c21cc265ce577f4b16476
Parents: f0da4fa 5fa2d9b
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Thu Sep 29 13:21:14 2016 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Thu Sep 29 13:21:14 2016 -0400

----------------------------------------------------------------------
 ambari-server/pom.xml                           |  23 +-
 .../checks/ServiceCheckValidityCheck.java       |  16 +-
 .../internal/HostResourceProvider.java          | 130 ++++--
 .../server/controller/internal/RequestImpl.java |   2 +-
 .../python/ambari_server/dbConfiguration.py     |  52 ++-
 .../AMBARI_METRICS/0.1.0/metainfo.xml           |  17 +-
 .../stacks/HDP/2.5/services/ATLAS/metainfo.xml  |   2 +-
 .../stacks/HDP/2.5/services/SPARK/kerberos.json |   2 +-
 .../stacks/HDP/2.5/services/YARN/metainfo.xml   |   5 +
 .../main/resources/stacks/HDP/2.6/metainfo.xml  |  23 +
 .../resources/stacks/HDP/2.6/repos/repoinfo.xml | 132 ++++++
 .../HDP/2.6/services/ACCUMULO/metainfo.xml      |  26 ++
 .../stacks/HDP/2.6/services/ATLAS/metainfo.xml  |  26 ++
 .../stacks/HDP/2.6/services/FALCON/metainfo.xml |  26 ++
 .../stacks/HDP/2.6/services/FLUME/metainfo.xml  |  26 ++
 .../stacks/HDP/2.6/services/HBASE/metainfo.xml  |  26 ++
 .../stacks/HDP/2.6/services/HDFS/metainfo.xml   |  26 ++
 .../stacks/HDP/2.6/services/HIVE/metainfo.xml   |  26 ++
 .../stacks/HDP/2.6/services/KAFKA/metainfo.xml  |  26 ++
 .../HDP/2.6/services/KERBEROS/metainfo.xml      |  25 ++
 .../stacks/HDP/2.6/services/KNOX/metainfo.xml   |  26 ++
 .../stacks/HDP/2.6/services/MAHOUT/metainfo.xml |  26 ++
 .../stacks/HDP/2.6/services/OOZIE/metainfo.xml  |  25 ++
 .../stacks/HDP/2.6/services/PIG/metainfo.xml    |  26 ++
 .../stacks/HDP/2.6/services/RANGER/metainfo.xml |  29 ++
 .../HDP/2.6/services/RANGER_KMS/metainfo.xml    |  29 ++
 .../stacks/HDP/2.6/services/SLIDER/metainfo.xml |  26 ++
 .../stacks/HDP/2.6/services/SPARK/metainfo.xml  |  29 ++
 .../stacks/HDP/2.6/services/SPARK2/metainfo.xml |  29 ++
 .../stacks/HDP/2.6/services/SQOOP/metainfo.xml  |  26 ++
 .../stacks/HDP/2.6/services/STORM/metainfo.xml  |  27 ++
 .../stacks/HDP/2.6/services/TEZ/metainfo.xml    |  26 ++
 .../stacks/HDP/2.6/services/YARN/metainfo.xml   |  27 ++
 .../HDP/2.6/services/ZEPPELIN/metainfo.xml      |  27 ++
 .../HDP/2.6/services/ZOOKEEPER/metainfo.xml     |  26 ++
 .../stacks/HDP/2.6/services/stack_advisor.py    |  21 +
 .../server/agent/TestHeartbeatMonitor.java      |  32 +-
 .../server/api/services/AmbariMetaInfoTest.java |  14 +-
 .../checks/ServiceCheckValidityCheckTest.java   |   4 +-
 .../server/checks/UpgradeCheckOrderTest.java    |   4 +-
 .../AmbariManagementControllerTest.java         | 106 +++++
 .../ConfigGroupResourceProviderTest.java        | 229 +++++-----
 .../internal/HostResourceProviderTest.java      |  15 +-
 ...ThreadPoolExecutorCompletionServiceTest.java |   2 +-
 .../metadata/AgentAlertDefinitionsTest.java     |   2 +-
 .../apache/ambari/server/orm/OrmTestHelper.java |  12 +-
 .../apache/ambari/server/orm/TestOrmImpl.java   |  94 ++---
 .../server/orm/dao/AlertDispatchDAOTest.java    | 419 ++++++-------------
 .../security/authorization/TestUsers.java       | 369 ++++++----------
 .../upgrades/ConfigureActionTest.java           | 104 ++---
 .../upgrades/UpgradeActionTest.java             | 150 ++-----
 .../apache/ambari/server/state/ServiceTest.java | 106 ++---
 .../svccomphost/ServiceComponentHostTest.java   | 167 +++-----
 .../server/upgrade/UpgradeCatalog210Test.java   |  34 +-
 .../src/test/python/TestAmbariServer.py         | 107 ++---
 55 files changed, 1745 insertions(+), 1287 deletions(-)
----------------------------------------------------------------------



[02/14] ambari git commit: AMBARI-18483. Revert db optimization in UT.(vbrodetskyi)

Posted by jo...@apache.org.
AMBARI-18483. Revert db optimization in UT.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/392a752d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/392a752d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/392a752d

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 392a752d27ce3d0dd7e044ce8c4ab5e9d10f6e74
Parents: 06282e0
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Wed Sep 28 14:02:38 2016 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Wed Sep 28 14:03:08 2016 +0300

----------------------------------------------------------------------
 .../server/agent/TestHeartbeatMonitor.java      |  32 +-
 ...ThreadPoolExecutorCompletionServiceTest.java |   2 +-
 .../apache/ambari/server/orm/OrmTestHelper.java |  12 +-
 .../apache/ambari/server/orm/TestOrmImpl.java   |  94 ++---
 .../server/orm/dao/AlertDispatchDAOTest.java    | 419 ++++++-------------
 .../security/authorization/TestUsers.java       | 369 ++++++----------
 .../upgrades/ConfigureActionTest.java           | 104 ++---
 .../upgrades/UpgradeActionTest.java             | 150 ++-----
 .../apache/ambari/server/state/ServiceTest.java | 106 ++---
 .../svccomphost/ServiceComponentHostTest.java   | 167 +++-----
 .../server/upgrade/UpgradeCatalog210Test.java   |  34 +-
 11 files changed, 456 insertions(+), 1033 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/392a752d/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
index 63460a2..d121fcb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
@@ -57,9 +57,7 @@ import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEve
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpSucceededEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartedEvent;
 import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 import org.slf4j.Logger;
@@ -68,7 +66,6 @@ import org.slf4j.LoggerFactory;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
-import com.google.inject.persist.UnitOfWork;
 
 public class TestHeartbeatMonitor {
 
@@ -86,44 +83,19 @@ public class TestHeartbeatMonitor {
   private static final Logger LOG =
           LoggerFactory.getLogger(TestHeartbeatMonitor.class);
 
-  @BeforeClass
-  public static void classSetUp() {
+  @Before
+  public void setup() throws Exception {
     injector = Guice.createInjector(new InMemoryDefaultTestModule());
     injector.getInstance(GuiceJpaInitializer.class);
     helper = injector.getInstance(OrmTestHelper.class);
     ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
   }
 
-  @Before
-  public void setup() throws Exception {
-    cleanup();
-    injector.getInstance(UnitOfWork.class).begin();
-  }
-
   @After
   public void teardown() {
-    injector.getInstance(UnitOfWork.class).end();
-  }
-
-  @AfterClass
-  public static void afterClass() throws Exception {
     injector.getInstance(PersistService.class).stop();
   }
 
-  private void cleanup() throws AmbariException {
-    Clusters clusters = injector.getInstance(Clusters.class);
-    Map<String, Cluster> clusterMap = clusters.getClusters();
-
-
-    for (String clusterName : clusterMap.keySet()) {
-      clusters.deleteCluster(clusterName);
-    }
-
-    for (Host host : clusters.getHosts()) {
-      clusters.deleteHost(host.getHostName());
-    }
-  }
-
   private void setOsFamily(Host host, String osFamily, String osVersion) {
     Map<String, String> hostAttributes = new HashMap<String, String>();
     hostAttributes.put("os_family", osFamily);

http://git-wip-us.apache.org/repos/asf/ambari/blob/392a752d/ambari-server/src/test/java/org/apache/ambari/server/controller/test/BufferedThreadPoolExecutorCompletionServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/test/BufferedThreadPoolExecutorCompletionServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/test/BufferedThreadPoolExecutorCompletionServiceTest.java
index ec99ab0..50ed33b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/test/BufferedThreadPoolExecutorCompletionServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/test/BufferedThreadPoolExecutorCompletionServiceTest.java
@@ -31,7 +31,7 @@ import org.junit.Test;
 public class BufferedThreadPoolExecutorCompletionServiceTest {
 
   private void longOp() throws InterruptedException {
-    Thread.sleep(1000);
+    Thread.sleep(700);
     System.out.println("Completed " + Thread.currentThread());
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/392a752d/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
index 1bfcfb9..de2d292 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
@@ -18,6 +18,8 @@
 
 package org.apache.ambari.server.orm;
 
+import javax.persistence.EntityManager;
+import junit.framework.Assert;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
@@ -33,8 +35,6 @@ import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
 
-import javax.persistence.EntityManager;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
@@ -96,8 +96,6 @@ import com.google.inject.Provider;
 import com.google.inject.Singleton;
 import com.google.inject.persist.Transactional;
 
-import junit.framework.Assert;
-
 @Singleton
 public class OrmTestHelper {
 
@@ -501,7 +499,7 @@ public class OrmTestHelper {
     target.setTargetName("Target Name " + System.currentTimeMillis());
 
     alertDispatchDAO.create(target);
-    return alertDispatchDAO.findTargetById(target.getTargetId());
+    return target;
   }
 
   /**
@@ -518,7 +516,7 @@ public class OrmTestHelper {
     target.setGlobal(true);
 
     alertDispatchDAO.create(target);
-    return alertDispatchDAO.findTargetById(target.getTargetId());
+    return target;
   }
 
   /**
@@ -563,7 +561,7 @@ public class OrmTestHelper {
     group.setAlertTargets(targets);
 
     alertDispatchDAO.create(group);
-    return alertDispatchDAO.findGroupById(group.getGroupId());
+    return group;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/392a752d/ambari-server/src/test/java/org/apache/ambari/server/orm/TestOrmImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/TestOrmImpl.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/TestOrmImpl.java
index e72692a..0f6d2e2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/TestOrmImpl.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/TestOrmImpl.java
@@ -48,98 +48,57 @@ import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.orm.entities.StageEntity;
 import org.apache.ambari.server.security.authorization.ResourceType;
 import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.inject.Guice;
+import com.google.inject.Inject;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
-import com.google.inject.persist.UnitOfWork;
 
 public class TestOrmImpl extends Assert {
   private static final Logger log = LoggerFactory.getLogger(TestOrmImpl.class);
+  @Inject
+  private Injector injector;
+  @Inject
+  private StackDAO stackDAO;
+  @Inject
+  private ResourceTypeDAO resourceTypeDAO;
+  @Inject
+  private ClusterDAO clusterDAO;
+  @Inject
+  private OrmTestHelper ormTestHelper;
+  @Inject
+  private ClusterServiceDAO clusterServiceDAO;
+  @Inject
+  private HostRoleCommandDAO hostRoleCommandDAO;
+  @Inject
+  private HostDAO hostDAO;
+  @Inject
+  private StageDAO stageDAO;
+  @Inject
+  private EntityManager entityManager;
+  @Inject
+  private RequestDAO requestDAO;
 
-  private static Injector injector;
-  private static StackDAO stackDAO;
-  private static ResourceTypeDAO resourceTypeDAO;
-  private static ClusterDAO clusterDAO;
-  private static OrmTestHelper ormTestHelper;
-  private static ClusterServiceDAO clusterServiceDAO;
-  private static HostRoleCommandDAO hostRoleCommandDAO;
-  private static HostDAO hostDAO;
-  private static StageDAO stageDAO;
-  private static EntityManager entityManager;
-  private static RequestDAO requestDAO;
-
-  @BeforeClass
-  public static void classSetUp() throws Exception {
+  @Before
+  public void setup() {
     injector = Guice.createInjector(new InMemoryDefaultTestModule());
     injector.getInstance(GuiceJpaInitializer.class);
-
+    injector.injectMembers(this);
     // required to load stack information into the DB
     injector.getInstance(AmbariMetaInfo.class);
-    stackDAO = injector.getInstance(StackDAO.class);
-    resourceTypeDAO = injector.getInstance(ResourceTypeDAO.class);
-    clusterDAO = injector.getInstance(ClusterDAO.class);
-    ormTestHelper = injector.getInstance(OrmTestHelper.class);
-    clusterServiceDAO = injector.getInstance(ClusterServiceDAO.class);
-    hostRoleCommandDAO = injector.getInstance(HostRoleCommandDAO.class);
-    hostDAO = injector.getInstance(HostDAO.class);
-    stageDAO = injector.getInstance(StageDAO.class);
-    entityManager = injector.getInstance(EntityManager.class);
-    requestDAO = injector.getInstance(RequestDAO.class);
-
     ormTestHelper.createDefaultData();
-    injector.getInstance(UnitOfWork.class).end();
-  }
-
-  @Before
-  public void setup() {
-    injector.getInstance(UnitOfWork.class).begin();
   }
 
   @After
   public void teardown() {
-    injector.getInstance(UnitOfWork.class).end();
-    cleanup();
-  }
-
-  @AfterClass
-  public static void afterClass() throws Exception {
     injector.getInstance(PersistService.class).stop();
   }
 
-  private void cleanup() {
-
-
-    List<HostRoleCommandEntity> hostRoleCommandEntities = hostRoleCommandDAO.findAll();
-    if (hostRoleCommandEntities != null) {
-      for (HostRoleCommandEntity hostRoleCommandEntity : hostRoleCommandEntities) {
-        hostRoleCommandDAO.remove(hostRoleCommandEntity);
-      }
-    }
-
-    List<StageEntity> stageEntities = stageDAO.findAll();
-    if (stageEntities != null) {
-      for (StageEntity stageEntity : stageEntities) {
-        stageDAO.remove(stageEntity);
-      }
-    }
-
-    List<RequestEntity> requestEntities = requestDAO.findAll();
-    if (requestEntities != null) {
-      for (RequestEntity requestEntity : requestEntities) {
-        requestDAO.remove(requestEntity);
-      }
-    }
-  }
-
   /**
    * persistence provider is responsible for returning empty collection if relation doesn't exists
    */
@@ -333,7 +292,6 @@ public class TestOrmImpl extends Assert {
     assertEquals(1L, stageDAO.getLastRequestId());
   }
 
-  @Ignore
   @Test
   public void testConcurrentModification() throws InterruptedException {
     final StackEntity stackEntity = stackDAO.find("HDP", "2.2.0");

http://git-wip-us.apache.org/repos/asf/ambari/blob/392a752d/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/AlertDispatchDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/AlertDispatchDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/AlertDispatchDAOTest.java
index 5697ba0..87afb38 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/AlertDispatchDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/AlertDispatchDAOTest.java
@@ -52,12 +52,9 @@ import org.apache.ambari.server.orm.entities.AlertGroupEntity;
 import org.apache.ambari.server.orm.entities.AlertHistoryEntity;
 import org.apache.ambari.server.orm.entities.AlertNoticeEntity;
 import org.apache.ambari.server.orm.entities.AlertTargetEntity;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
 import org.apache.ambari.server.state.AlertState;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.NotificationState;
 import org.apache.ambari.server.state.ServiceComponentFactory;
 import org.apache.ambari.server.state.ServiceComponentHostFactory;
@@ -66,9 +63,7 @@ import org.apache.ambari.server.state.alert.Scope;
 import org.apache.ambari.server.state.alert.SourceType;
 import org.apache.ambari.server.utils.EventBusSynchronizer;
 import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
 import com.google.inject.Guice;
@@ -83,25 +78,30 @@ public class AlertDispatchDAOTest {
 
   private final static String HOSTNAME = "c6401.ambari.apache.org";
 
-  private static Clusters m_clusters;
-  private static Cluster m_cluster;
-  private static Injector m_injector;
-  private static AlertDispatchDAO m_dao;
-  private static AlertDefinitionDAO m_definitionDao;
-  private static AlertsDAO m_alertsDao;
-  private static OrmTestHelper m_helper;
-  private static HostComponentDesiredStateDAO hostComponentDesiredStateDAO;
-  private static HostComponentStateDAO hostComponentStateDAO;
-
-  private static ServiceFactory m_serviceFactory;
-  private static ServiceComponentFactory m_componentFactory;
-  private static ServiceComponentHostFactory m_schFactory;
-  private static AlertDaoHelper m_alertHelper;
-
-  @BeforeClass
-  public static void classSetUp() throws Exception {
+  private Clusters m_clusters;
+  private Cluster m_cluster;
+  private Injector m_injector;
+  private AlertDispatchDAO m_dao;
+  private AlertDefinitionDAO m_definitionDao;
+  private AlertsDAO m_alertsDao;
+  private OrmTestHelper m_helper;
+  private HostComponentDesiredStateDAO hostComponentDesiredStateDAO;
+  private HostComponentStateDAO hostComponentStateDAO;
+
+  private ServiceFactory m_serviceFactory;
+  private ServiceComponentFactory m_componentFactory;
+  private ServiceComponentHostFactory m_schFactory;
+  private AlertDaoHelper m_alertHelper;
+
+  /**
+   *
+   */
+  @Before
+  public void setup() throws Exception {
     m_injector = Guice.createInjector(new InMemoryDefaultTestModule());
     m_injector.getInstance(GuiceJpaInitializer.class);
+    m_injector.getInstance(UnitOfWork.class).begin();
+
 
     m_dao = m_injector.getInstance(AlertDispatchDAO.class);
     m_alertsDao = m_injector.getInstance(AlertsDAO.class);
@@ -112,8 +112,7 @@ public class AlertDispatchDAOTest {
     m_schFactory = m_injector.getInstance(ServiceComponentHostFactory.class);
     m_clusters = m_injector.getInstance(Clusters.class);
     m_alertHelper = m_injector.getInstance(AlertDaoHelper.class);
-    hostComponentDesiredStateDAO = m_injector.getInstance(HostComponentDesiredStateDAO.class);
-    hostComponentStateDAO = m_injector.getInstance(HostComponentStateDAO.class);
+
 
     // !!! need a synchronous op for testing
     EventBusSynchronizer.synchronizeAmbariEventPublisher(m_injector);
@@ -123,77 +122,18 @@ public class AlertDispatchDAOTest {
   }
 
   /**
-   *
-   */
-  @Before
-  public void setup() throws Exception {
-    m_injector.getInstance(UnitOfWork.class).begin();
-  }
-
-  /**
    * @throws Exception
    */
   @After
   public void teardown() throws Exception {
     m_injector.getInstance(UnitOfWork.class).end();
-    cleanup();
-  }
-
-  @AfterClass
-  public static void afterClass() throws Exception {
     m_injector.getInstance(PersistService.class).stop();
   }
 
-  private void cleanup() throws AmbariException {
-    List<AlertGroupEntity> alertGroupEntities = m_dao.findAllGroups();
-    if (alertGroupEntities != null) {
-      for (AlertGroupEntity alertGroupEntity : alertGroupEntities) {
-        m_dao.remove(alertGroupEntity);
-      }
-    }
-
-    List<AlertTargetEntity> alertTargetEntities = m_dao.findAllTargets();
-    if (alertTargetEntities != null) {
-      for (AlertTargetEntity alertTargetEntity : alertTargetEntities) {
-        m_dao.remove(alertTargetEntity);
-      }
-    }
-
-    List<HostComponentDesiredStateEntity> hostComponentDesiredStateEntities = hostComponentDesiredStateDAO.findAll();
-    if (hostComponentDesiredStateEntities != null) {
-      for (HostComponentDesiredStateEntity hcdse : hostComponentDesiredStateEntities) {
-        hostComponentDesiredStateDAO.remove(hcdse);
-      }
-    }
-
-    List<HostComponentStateEntity> hostComponentStateEntities = hostComponentStateDAO.findAll();
-    if (hostComponentStateEntities != null) {
-      for (HostComponentStateEntity hcse : hostComponentStateEntities) {
-        hostComponentStateDAO.remove(hcse);
-      }
-    }
-
-    List<AlertDefinitionEntity> alertDefinitionEntities = m_definitionDao.findAll();
-    if (alertDefinitionEntities != null) {
-      for (AlertDefinitionEntity alertDefinitionEntity : alertDefinitionEntities) {
-        m_definitionDao.remove(alertDefinitionEntity);
-      }
-    }
-
-    m_cluster.deleteAllServices();
-
-    List<Host> hosts = m_clusters.getHosts();
-    if (hosts != null) {
-      for (Host host : hosts) {
-        m_clusters.deleteHost(host.getHostName());
-      }
-    }
-  }
-
   private void initTestData() throws Exception {
     Set<AlertTargetEntity> targets = createTargets();
 
-    for (int i = 0; i < 10; i++) {
+    for (int i = 0; i < 2; i++) {
       AlertGroupEntity group = new AlertGroupEntity();
       group.setDefault(false);
       group.setGroupName("Group Name " + i);
@@ -210,28 +150,26 @@ public class AlertDispatchDAOTest {
    *
    */
   @Test
-  public void testFindAllTargets() throws Exception {
+  public void testFindTargets() throws Exception {
     initTestData();
+    // find all targets
     List<AlertTargetEntity> targets = m_dao.findAllTargets();
     assertNotNull(targets);
-    assertEquals(5, targets.size());
-  }
-
-  /**
-   * @throws Exception
-   */
-  public void testFindTargetsByIds() throws Exception {
-    List<AlertTargetEntity> targets = m_dao.findAllTargets();
-    assertNotNull(targets);
-    assertEquals(5, targets.size());
+    assertEquals(1, targets.size());
 
+    // find by ids
     List<Long> ids = new ArrayList<Long>();
     ids.add(targets.get(0).getTargetId());
-    ids.add(targets.get(1).getTargetId());
     ids.add(99999L);
 
     targets = m_dao.findTargetsById(ids);
-    assertEquals(2, targets.size());
+    assertEquals(1, targets.size());
+
+    //find by name
+    AlertTargetEntity target = targets.get(0);
+
+    AlertTargetEntity actual = m_dao.findTargetByName(target.getTargetName());
+    assertEquals(target, actual);
   }
 
   /**
@@ -259,53 +197,22 @@ public class AlertDispatchDAOTest {
    *
    */
   @Test
-  public void testFindTargetByName() throws Exception {
-    initTestData();
-    List<AlertTargetEntity> targets = m_dao.findAllTargets();
-    assertNotNull(targets);
-    AlertTargetEntity target = targets.get(3);
-
-    AlertTargetEntity actual = m_dao.findTargetByName(target.getTargetName());
-    assertEquals(target, actual);
-  }
-
-  /**
-   *
-   */
-  @Test
-  public void testFindAllGroups() throws Exception {
+  public void testFindGroups() throws Exception {
     initTestData();
+    // find all
     List<AlertGroupEntity> groups = m_dao.findAllGroups();
     assertNotNull(groups);
-    assertEquals(10, groups.size());
-  }
+    assertEquals(2, groups.size());
 
-  /**
-   *
-   */
-  @Test
-  public void testFindGroupByName() throws Exception {
-    initTestData();
-    List<AlertGroupEntity> groups = m_dao.findAllGroups();
-    assertNotNull(groups);
-    AlertGroupEntity group = groups.get(3);
+    //find by name
+    AlertGroupEntity group = groups.get(1);
 
     AlertGroupEntity actual = m_dao.findGroupByName(group.getClusterId(),
-        group.getGroupName());
+            group.getGroupName());
 
     assertEquals(group, actual);
-  }
-
-  /**
-   * @throws Exception
-   */
-  @Test
-  public void testFindGroupsByIds() throws Exception {
-    initTestData();
-    List<AlertGroupEntity> groups = m_dao.findAllGroups();
-    assertNotNull(groups);
-    assertEquals(10, groups.size());
 
+    //find by id
     List<Long> ids = new ArrayList<Long>();
     ids.add(groups.get(0).getGroupId());
     ids.add(groups.get(1).getGroupId());
@@ -313,13 +220,28 @@ public class AlertDispatchDAOTest {
 
     groups = m_dao.findGroupsById(ids);
     assertEquals(2, groups.size());
+
+    // find default group
+    for (AlertGroupEntity alertGroupEntity : groups) {
+      assertFalse(alertGroupEntity.isDefault());
+    }
+
+    Cluster cluster = m_helper.buildNewCluster(m_clusters, m_serviceFactory,
+            m_componentFactory, m_schFactory, HOSTNAME);
+
+    AlertGroupEntity hdfsGroup = m_dao.findDefaultServiceGroup(
+            cluster.getClusterId(), "HDFS");
+
+    assertNotNull(hdfsGroup);
+    assertTrue(hdfsGroup.isDefault());
   }
 
   /**
    *
    */
   @Test
-  public void testCreateGroup() throws Exception {
+  public void testCreateUpdateRemoveGroup() throws Exception {
+    // create group
     AlertTargetEntity target = m_helper.createAlertTarget();
     Set<AlertTargetEntity> targets = new HashSet<AlertTargetEntity>();
     targets.add(target);
@@ -333,15 +255,47 @@ public class AlertDispatchDAOTest {
     assertEquals(group.isDefault(), actual.isDefault());
     assertEquals(group.getAlertTargets(), actual.getAlertTargets());
     assertEquals(group.getAlertDefinitions(), actual.getAlertDefinitions());
-  }
 
+    // update group
+    AlertGroupEntity group1 = m_helper.createAlertGroup(
+            m_cluster.getClusterId(), null);
+
+    String groupName = group1.getGroupName();
+
+    group1 = m_dao.findGroupById(group1.getGroupId());
+    group1.setGroupName(groupName + "FOO");
+    group1.setDefault(true);
+
+    m_dao.merge(group1);
+    group = m_dao.findGroupById(group1.getGroupId());
+
+    assertEquals(groupName + "FOO", group1.getGroupName());
+    assertEquals(true, group1.isDefault());
+    assertEquals(0, group1.getAlertDefinitions().size());
+    assertEquals(0, group1.getAlertTargets().size());
+
+    group1.addAlertTarget(target);
+    m_dao.merge(group);
+
+    group1 = m_dao.findGroupById(group1.getGroupId());
+    assertEquals(targets, group1.getAlertTargets());
+
+    // delete group
+    m_dao.remove(group);
+    group = m_dao.findGroupById(group.getGroupId());
+    assertNull(group);
 
+    target = m_dao.findTargetById(target.getTargetId());
+    assertNotNull(target);
+    assertEquals(1, m_dao.findAllTargets().size());
+  }
 
   /**
    *
    */
   @Test
-  public void testCreateTarget() throws Exception {
+  public void testCreateAndRemoveTarget() throws Exception {
+    // create target
     int targetCount = m_dao.findAllTargets().size();
 
     AlertTargetEntity target = m_helper.createAlertTarget();
@@ -367,6 +321,13 @@ public class AlertDispatchDAOTest {
     assertEquals(group, actualGroup);
 
     assertEquals(targetCount + 1, m_dao.findAllTargets().size());
+
+    // remove target
+    m_dao.remove(target);
+
+    target = m_dao.findTargetById(target.getTargetId());
+    assertNull(target);
+
   }
 
   /**
@@ -426,57 +387,6 @@ public class AlertDispatchDAOTest {
   }
 
   /**
-   *
-   */
-  @Test
-  public void testDeleteGroup() throws Exception {
-    int targetCount = m_dao.findAllTargets().size();
-
-    AlertGroupEntity group = m_helper.createAlertGroup(
-        m_cluster.getClusterId(), null);
-    AlertTargetEntity target = m_helper.createAlertTarget();
-    assertEquals(targetCount + 1, m_dao.findAllTargets().size());
-
-    group = m_dao.findGroupById(group.getGroupId());
-    assertNotNull(group);
-    assertNotNull(group.getAlertTargets());
-    assertEquals(0, group.getAlertTargets().size());
-
-    group.addAlertTarget(target);
-    m_dao.merge(group);
-
-    group = m_dao.findGroupById(group.getGroupId());
-    assertNotNull(group);
-    assertNotNull(group.getAlertTargets());
-    assertEquals(1, group.getAlertTargets().size());
-
-    m_dao.remove(group);
-    group = m_dao.findGroupById(group.getGroupId());
-    assertNull(group);
-
-    target = m_dao.findTargetById(target.getTargetId());
-    assertNotNull(target);
-    assertEquals(targetCount + 1, m_dao.findAllTargets().size());
-  }
-
-  /**
-   *
-   */
-  @Test
-  public void testDeleteTarget() throws Exception {
-    AlertTargetEntity target = m_helper.createAlertTarget();
-    target = m_dao.findTargetById(target.getTargetId());
-    assertTrue(target.getAlertStates().size() > 0);
-
-    assertNotNull(target);
-
-    m_dao.remove(target);
-
-    target = m_dao.findTargetById(target.getTargetId());
-    assertNull(target);
-  }
-
-  /**
   *
   */
   @Test
@@ -544,39 +454,6 @@ public class AlertDispatchDAOTest {
   }
 
   /**
-   *
-   */
-  @Test
-  public void testUpdateGroup() throws Exception {
-    AlertTargetEntity target = m_helper.createAlertTarget();
-    Set<AlertTargetEntity> targets = new HashSet<AlertTargetEntity>();
-    targets.add(target);
-
-    String groupName = "Group Name " + System.currentTimeMillis();
-
-    AlertGroupEntity group = m_helper.createAlertGroup(
-        m_cluster.getClusterId(), null);
-
-    group = m_dao.findGroupById(group.getGroupId());
-    group.setGroupName(groupName + "FOO");
-    group.setDefault(true);
-
-    m_dao.merge(group);
-    group = m_dao.findGroupById(group.getGroupId());
-
-    assertEquals(groupName + "FOO", group.getGroupName());
-    assertEquals(true, group.isDefault());
-    assertEquals(0, group.getAlertDefinitions().size());
-    assertEquals(0, group.getAlertTargets().size());
-
-    group.addAlertTarget(target);
-    m_dao.merge(group);
-
-    group = m_dao.findGroupById(group.getGroupId());
-    assertEquals(targets, group.getAlertTargets());
-  }
-
-  /**
    * Tests finding groups by a definition ID that they are associatd with.
    *
    * @throws Exception
@@ -686,10 +563,11 @@ public class AlertDispatchDAOTest {
    */
   @Test
   public void testAlertNoticePredicate() throws Exception {
-    Cluster cluster = m_helper.buildNewCluster(m_clusters, m_serviceFactory,
-        m_componentFactory, m_schFactory, HOSTNAME);
+    m_helper.addHost(m_clusters, m_cluster, HOSTNAME);
+    m_helper.installHdfsService(m_cluster, m_serviceFactory, m_componentFactory, m_schFactory, HOSTNAME);
+    m_helper.installYarnService(m_cluster, m_serviceFactory, m_componentFactory, m_schFactory, HOSTNAME);
 
-    m_alertHelper.populateData(cluster);
+    m_alertHelper.populateData(m_cluster);
 
     Predicate clusterPredicate = null;
     Predicate hdfsPredicate = null;
@@ -764,10 +642,11 @@ public class AlertDispatchDAOTest {
    */
   @Test
   public void testAlertNoticePagination() throws Exception {
-    Cluster cluster = m_helper.buildNewCluster(m_clusters, m_serviceFactory,
-        m_componentFactory, m_schFactory, HOSTNAME);
+    m_helper.addHost(m_clusters, m_cluster, HOSTNAME);
+    m_helper.installHdfsService(m_cluster, m_serviceFactory, m_componentFactory, m_schFactory, HOSTNAME);
+    m_helper.installYarnService(m_cluster, m_serviceFactory, m_componentFactory, m_schFactory, HOSTNAME);
 
-    m_alertHelper.populateData(cluster);
+    m_alertHelper.populateData(m_cluster);
 
     AlertNoticeRequest request = new AlertNoticeRequest();
     request.Pagination = null;
@@ -805,10 +684,11 @@ public class AlertDispatchDAOTest {
    */
   @Test
   public void testAlertNoticeSorting() throws Exception {
-    Cluster cluster = m_helper.buildNewCluster(m_clusters, m_serviceFactory,
-        m_componentFactory, m_schFactory, HOSTNAME);
+    m_helper.addHost(m_clusters, m_cluster, HOSTNAME);
+    m_helper.installHdfsService(m_cluster, m_serviceFactory, m_componentFactory, m_schFactory, HOSTNAME);
+    m_helper.installYarnService(m_cluster, m_serviceFactory, m_componentFactory, m_schFactory, HOSTNAME);
 
-    m_alertHelper.populateData(cluster);
+    m_alertHelper.populateData(m_cluster);
 
     List<SortRequestProperty> sortProperties = new ArrayList<SortRequestProperty>();
     SortRequest sortRequest = new SortRequestImpl(sortProperties);
@@ -864,57 +744,25 @@ public class AlertDispatchDAOTest {
   }
 
   /**
-   *
-   */
-  @Test
-  public void testFindDefaultGroup() throws Exception {
-    initTestData();
-    List<AlertGroupEntity> groups = m_dao.findAllGroups();
-    assertNotNull(groups);
-    assertEquals(10, groups.size());
-
-    for (AlertGroupEntity group : groups) {
-      assertFalse(group.isDefault());
-    }
-
-    Cluster cluster = m_helper.buildNewCluster(m_clusters, m_serviceFactory,
-        m_componentFactory, m_schFactory, HOSTNAME);
-
-    AlertGroupEntity hdfsGroup = m_dao.findDefaultServiceGroup(
-        cluster.getClusterId(), "HDFS");
-
-    assertNotNull(hdfsGroup);
-    assertTrue(hdfsGroup.isDefault());
-  }
-
-  /**
    * Tests that when creating a new {@link AlertDefinitionEntity}, if the group
    * for its service does not exist, then it will be created.
    */
   @Test
   public void testDefaultGroupAutomaticCreation() throws Exception {
-    initTestData();
-    List<AlertGroupEntity> groups = m_dao.findAllGroups();
-    assertNotNull(groups);
-    assertEquals(10, groups.size());
-
-    for (AlertGroupEntity group : groups) {
-      assertFalse(group.isDefault());
-    }
-
-    Cluster cluster = m_helper.buildNewCluster(m_clusters, m_serviceFactory,
-        m_componentFactory, m_schFactory, HOSTNAME);
+    m_helper.addHost(m_clusters, m_cluster, HOSTNAME);
+    m_helper.installHdfsService(m_cluster, m_serviceFactory, m_componentFactory, m_schFactory, HOSTNAME);
+    //m_helper.installYarnService(m_cluster, m_serviceFactory, m_componentFactory, m_schFactory, HOSTNAME);
 
     AlertGroupEntity hdfsGroup = m_dao.findDefaultServiceGroup(
-        cluster.getClusterId(), "HDFS");
+            m_cluster.getClusterId(), "HDFS");
 
     // remove the HDFS default group
     m_dao.remove(hdfsGroup);
-    hdfsGroup = m_dao.findDefaultServiceGroup(cluster.getClusterId(), "HDFS");
+    hdfsGroup = m_dao.findDefaultServiceGroup(m_cluster.getClusterId(), "HDFS");
     assertNull(hdfsGroup);
 
     AlertDefinitionEntity datanodeProcess = new AlertDefinitionEntity();
-    datanodeProcess.setClusterId(cluster.getClusterId());
+    datanodeProcess.setClusterId(m_cluster.getClusterId());
     datanodeProcess.setDefinitionName("datanode_process");
     datanodeProcess.setServiceName("HDFS");
     datanodeProcess.setComponentName("DATANODE");
@@ -926,7 +774,7 @@ public class AlertDispatchDAOTest {
     m_definitionDao.create(datanodeProcess);
 
     // the group should be created and should be default
-    hdfsGroup = m_dao.findDefaultServiceGroup(cluster.getClusterId(), "HDFS");
+    hdfsGroup = m_dao.findDefaultServiceGroup(m_cluster.getClusterId(), "HDFS");
     assertNotNull(hdfsGroup);
     assertTrue(hdfsGroup.isDefault());
   }
@@ -939,22 +787,15 @@ public class AlertDispatchDAOTest {
   @Test(expected = AmbariException.class)
   public void testDefaultGroupInvalidServiceNoCreation() throws Exception {
     initTestData();
-    List<AlertGroupEntity> groups = m_dao.findAllGroups();
-    assertNotNull(groups);
-    assertEquals(10, groups.size());
-
-    for (AlertGroupEntity group : groups) {
-      assertFalse(group.isDefault());
-    }
-
-    Cluster cluster = m_helper.buildNewCluster(m_clusters, m_serviceFactory,
-        m_componentFactory, m_schFactory, HOSTNAME);
+    m_helper.addHost(m_clusters, m_cluster, HOSTNAME);
+    m_helper.installHdfsService(m_cluster, m_serviceFactory, m_componentFactory, m_schFactory, HOSTNAME);
+    //m_helper.installYarnService(m_cluster, m_serviceFactory, m_componentFactory, m_schFactory, HOSTNAME);
 
-    assertEquals(12, m_dao.findAllGroups().size());
+    assertEquals(3, m_dao.findAllGroups().size());
 
     // create a definition with an invalid service
     AlertDefinitionEntity datanodeProcess = new AlertDefinitionEntity();
-    datanodeProcess.setClusterId(cluster.getClusterId());
+    datanodeProcess.setClusterId(m_cluster.getClusterId());
     datanodeProcess.setDefinitionName("datanode_process");
     datanodeProcess.setServiceName("INVALID");
     datanodeProcess.setComponentName("DATANODE");
@@ -968,8 +809,7 @@ public class AlertDispatchDAOTest {
       m_definitionDao.create(datanodeProcess);
     } finally {
       // assert no group was added
-      assertEquals(12, m_dao.findAllGroups().size());
-      teardown();
+      assertEquals(3, m_dao.findAllGroups().size());
     }
   }
 
@@ -985,7 +825,9 @@ public class AlertDispatchDAOTest {
     m_helper.installYarnService(m_cluster, m_serviceFactory,
         m_componentFactory, m_schFactory, HOSTNAME);
 
-    for (int i = 0; i < 8; i++) {
+    List<AlertDefinitionEntity> alertDefinitionEntities = new ArrayList<>();
+
+    for (int i = 0; i < 2; i++) {
       AlertDefinitionEntity definition = new AlertDefinitionEntity();
       definition.setDefinitionName("Alert Definition " + i);
       definition.setServiceName("YARN");
@@ -997,11 +839,10 @@ public class AlertDispatchDAOTest {
       definition.setSource("{\"type\" : \"SCRIPT\"}");
       definition.setSourceType(SourceType.SCRIPT);
       m_definitionDao.create(definition);
+      alertDefinitionEntities.add(definition);
     }
 
-    List<AlertDefinitionEntity> alertDefinitions = m_definitionDao.findAll();
-    assertEquals(8, alertDefinitions.size());
-    return alertDefinitions;
+    return alertDefinitionEntities;
   }
 
   /**
@@ -1010,7 +851,7 @@ public class AlertDispatchDAOTest {
    */
   private Set<AlertTargetEntity> createTargets() throws Exception {
     Set<AlertTargetEntity> targets = new HashSet<AlertTargetEntity>();
-    for (int i = 0; i < 5; i++) {
+    for (int i = 0; i < 1; i++) {
       AlertTargetEntity target = new AlertTargetEntity();
       target.setDescription("Target Description " + i);
       target.setNotificationType("EMAIL");

http://git-wip-us.apache.org/repos/asf/ambari/blob/392a752d/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/TestUsers.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/TestUsers.java b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/TestUsers.java
index a80cd03..f54ac5c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/TestUsers.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/TestUsers.java
@@ -43,9 +43,7 @@ import org.apache.ambari.server.orm.entities.ResourceEntity;
 import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
 import org.apache.ambari.server.orm.entities.UserEntity;
 import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -55,102 +53,77 @@ import org.springframework.security.core.context.SecurityContextHolder;
 import org.springframework.security.crypto.password.PasswordEncoder;
 
 import com.google.inject.Guice;
+import com.google.inject.Inject;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 
 public class TestUsers {
-  private static Injector injector;
-
-
-  protected static Users users;
-  protected static UserDAO userDAO;
-  protected static GroupDAO groupDAO;
-  protected static PermissionDAO permissionDAO;
-  protected static ResourceDAO resourceDAO;
-  protected static ResourceTypeDAO resourceTypeDAO;
-  protected static PrincipalTypeDAO principalTypeDAO;
-  protected static PrincipalDAO principalDAO;
-  protected static PasswordEncoder passwordEncoder;
-
-
-  @BeforeClass
-  public static void classSetUp() {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    users = injector.getInstance(Users.class);
-    userDAO = injector.getInstance(UserDAO.class);
-    groupDAO = injector.getInstance(GroupDAO.class);
-    permissionDAO = injector.getInstance(PermissionDAO.class);
-    resourceDAO = injector.getInstance(ResourceDAO.class);
-    resourceTypeDAO = injector.getInstance(ResourceTypeDAO.class);
-    principalTypeDAO = injector.getInstance(PrincipalTypeDAO.class);
-    principalDAO = injector.getInstance(PrincipalDAO.class);
-    passwordEncoder = injector.getInstance(PasswordEncoder.class);
-  }
+  private Injector injector;
+
+  @Inject
+  protected Users users;
+  @Inject
+  protected UserDAO userDAO;
+  @Inject
+  protected GroupDAO groupDAO;
+  @Inject
+  protected PermissionDAO permissionDAO;
+  @Inject
+  protected ResourceDAO resourceDAO;
+  @Inject
+  protected ResourceTypeDAO resourceTypeDAO;
+  @Inject
+  protected PrincipalTypeDAO principalTypeDAO;
+  @Inject
+  protected PrincipalDAO principalDAO;
+  @Inject
+  protected PasswordEncoder passwordEncoder;
 
   @Before
   public void setup() throws AmbariException {
-    //injector.injectMembers(this);
+    InMemoryDefaultTestModule module = new InMemoryDefaultTestModule();
+    injector = Guice.createInjector(module);
+    injector.getInstance(GuiceJpaInitializer.class);
+    injector.injectMembers(this);
     Authentication auth = new UsernamePasswordAuthenticationToken("admin", null);
     SecurityContextHolder.getContext().setAuthentication(auth);
 
-    if (resourceTypeDAO.findByName(ResourceType.AMBARI.name()) == null) {
-      // create admin permission
-      ResourceTypeEntity resourceTypeEntity = new ResourceTypeEntity();
-      resourceTypeEntity.setId(ResourceType.AMBARI.getId());
-      resourceTypeEntity.setName(ResourceType.AMBARI.name());
-      resourceTypeDAO.create(resourceTypeEntity);
-
-      ResourceEntity resourceEntity = new ResourceEntity();
-      resourceEntity.setId(ResourceEntity.AMBARI_RESOURCE_ID);
-      resourceEntity.setResourceType(resourceTypeEntity);
-      resourceDAO.create(resourceEntity);
-
-      PrincipalTypeEntity principalTypeEntity = new PrincipalTypeEntity();
-      principalTypeEntity.setName("ROLE");
-      principalTypeEntity = principalTypeDAO.merge(principalTypeEntity);
-
-      PrincipalEntity principalEntity = new PrincipalEntity();
-      principalEntity.setPrincipalType(principalTypeEntity);
-      principalEntity = principalDAO.merge(principalEntity);
-
-      PermissionEntity adminPermissionEntity = new PermissionEntity();
-      adminPermissionEntity.setId(PermissionEntity.AMBARI_ADMINISTRATOR_PERMISSION);
-      adminPermissionEntity.setPermissionName(PermissionEntity.AMBARI_ADMINISTRATOR_PERMISSION_NAME);
-      adminPermissionEntity.setPrincipal(principalEntity);
-      adminPermissionEntity.setResourceType(resourceTypeEntity);
-      permissionDAO.create(adminPermissionEntity);
-
-      users.createUser("admin", "admin", UserType.LOCAL, true, true);
-    }
-    cleanup();
-  }
+    // create admin permission
+    ResourceTypeEntity resourceTypeEntity = new ResourceTypeEntity();
+    resourceTypeEntity.setId(ResourceType.AMBARI.getId());
+    resourceTypeEntity.setName(ResourceType.AMBARI.name());
+    resourceTypeDAO.create(resourceTypeEntity);
 
-  @After
-  public void tearDown() throws AmbariException {
+    ResourceEntity resourceEntity = new ResourceEntity();
+    resourceEntity.setId(ResourceEntity.AMBARI_RESOURCE_ID);
+    resourceEntity.setResourceType(resourceTypeEntity);
+    resourceDAO.create(resourceEntity);
+
+    PrincipalTypeEntity principalTypeEntity = new PrincipalTypeEntity();
+    principalTypeEntity.setName("ROLE");
+    principalTypeEntity = principalTypeDAO.merge(principalTypeEntity);
+
+    PrincipalEntity principalEntity = new PrincipalEntity();
+    principalEntity.setPrincipalType(principalTypeEntity);
+    principalEntity = principalDAO.merge(principalEntity);
 
+    PermissionEntity adminPermissionEntity = new PermissionEntity();
+    adminPermissionEntity.setId(PermissionEntity.AMBARI_ADMINISTRATOR_PERMISSION);
+    adminPermissionEntity.setPermissionName(PermissionEntity.AMBARI_ADMINISTRATOR_PERMISSION_NAME);
+    adminPermissionEntity.setPrincipal(principalEntity);
+    adminPermissionEntity.setResourceType(resourceTypeEntity);
+    permissionDAO.create(adminPermissionEntity);
   }
 
-  @AfterClass
-  public static void afterClass() throws Exception {
+  @After
+  public void tearDown() throws AmbariException {
     injector.getInstance(PersistService.class).stop();
   }
 
-  private void cleanup() throws AmbariException {
-    for (User user : users.getAllUsers()) {
-      if (!user.getUserName().equals("admin")) {
-        users.removeUser(user);
-      }
-    }
-
-    for (Group group : users.getAllGroups()) {
-      users.removeGroup(group);
-    }
-  }
 
   @Test
   public void testIsUserCanBeRemoved() throws Exception {
-    //users.createUser("admin", "admin", UserType.LOCAL, true, true);
+    users.createUser("admin", "admin", UserType.LOCAL, true, true);
     users.createUser("admin222", "admin222", UserType.LOCAL, true, true);
 
     Assert.assertTrue(users.isUserCanBeRemoved(userDAO.findUserByName("admin")));
@@ -168,19 +141,8 @@ public class TestUsers {
   }
 
   @Test
-  public void testGetUserIfUnique() throws Exception {
-    users.createUser("user333", "user333", UserType.LOCAL, true, false);
-
-    Assert.assertNotNull(users.getUserIfUnique("user333"));
-
-    users.createUser("user333", "user333", UserType.LDAP, true, false);
-
-    Assert.assertNull(users.getUserIfUnique("user333"));
-  }
-
-  @Test
   public void testModifyPassword_UserByAdmin() throws Exception {
-    users.createUser("new_admin", "new_admin", UserType.LOCAL, true, true);
+    users.createUser("admin", "admin", UserType.LOCAL, true, true);
     users.createUser("user", "user");
 
     UserEntity userEntity = userDAO.findUserByName("user");
@@ -213,80 +175,81 @@ public class TestUsers {
   }
 
   @Test
-  public void testGetGroupMembers() throws Exception {
-    final String groupNameTwoMembers = "engineering";
-    final String groupNameZeroMembers = "management";
-    users.createGroup(groupNameTwoMembers);
-    users.createGroup(groupNameZeroMembers);
+  public void testCreateGetRemoveUser() throws Exception {
     users.createUser("user1", "user1");
-    users.createUser("user2", "user2");
-    users.addMemberToGroup(groupNameTwoMembers, "user1");
-    users.addMemberToGroup(groupNameTwoMembers, "user2");
-
-    assertEquals(users.getGroupMembers(groupNameTwoMembers).size(), 2);
-    assertEquals(users.getGroupMembers(groupNameZeroMembers).size(), 0);
-  }
+    users.createUser("user", "user", UserType.LOCAL, false, false);
+    users.createUser("user_ldap", "user_ldap", UserType.LDAP, true, true);
+    User createdUser = users.getUser("user", UserType.LOCAL);
+    User createdUser1 = users.getAnyUser("user1");
+    User createdLdapUser = users.getUser("user_ldap", UserType.LDAP);
 
-  @Test
-  public void testGetAllUsers() throws Exception {
-    Authentication auth = new UsernamePasswordAuthenticationToken("user", null);
-    SecurityContextHolder.getContext().setAuthentication(auth);
+    Assert.assertEquals("user1", createdUser1.getUserName());
+    Assert.assertEquals(true, createdUser1.isActive());
+    Assert.assertEquals(false, createdUser1.isLdapUser());
+    Assert.assertEquals(false, createdUser1.isAdmin());
 
-    users.createUser("user", "user");
+    Assert.assertEquals("user", createdUser.getUserName());
+    Assert.assertEquals(false, createdUser.isActive());
+    Assert.assertEquals(false, createdUser.isLdapUser());
+    Assert.assertEquals(false, createdUser.isAdmin());
 
-    List<User> userList = users.getAllUsers();
+    Assert.assertEquals("user_ldap", createdLdapUser.getUserName());
+    Assert.assertEquals(true, createdLdapUser.isActive());
+    Assert.assertEquals(true, createdLdapUser.isLdapUser());
+    Assert.assertEquals(true, createdLdapUser.isAdmin());
 
-    assertEquals(2, userList.size());
+    assertEquals("user", users.getAnyUser("user").getUserName());
+    assertEquals("user_ldap", users.getAnyUser("user_ldap").getUserName());
+    Assert.assertNull(users.getAnyUser("non_existing"));
 
-    for (User user : userList) {
-      assertEquals(false, user.isLdapUser());
+    // create duplicate user
+    try {
+      users.createUser("user1", "user1");
+      Assert.fail("It shouldn't be possible to create duplicate user");
+    } catch (AmbariException e) {
     }
 
-    assertEquals(2, userDAO.findAll().size());
-
-    UserEntity userEntity = userDAO.findUserByName("user");
-    assertNotNull("user", userEntity.getUserPassword());
-
-    users.modifyPassword("user", "user", "resu");
-
-    assertNotSame(userEntity.getUserPassword(), userDAO.findUserByName("user").getUserPassword());
-  }
-
-  @Test
-  public void testGetAnyUser() throws Exception {
-    users.createUser("user", "user", UserType.LOCAL, true, false);
-    users.createUser("user_ldap", "user_ldap", UserType.LDAP, true, false);
+    try {
+      users.createUser("USER1", "user1");
+      Assert.fail("It shouldn't be possible to create duplicate user");
+    } catch (AmbariException e) {
+    }
 
-    assertEquals("user", users.getAnyUser("user").getUserName());
-    assertEquals("user_ldap", users.getAnyUser("user_ldap").getUserName());
-    Assert.assertNull(users.getAnyUser("non_existing"));
-  }
+    // test get all users
+    List<User> userList = users.getAllUsers();
 
-  @Test
-  public void testGetAnyUserCaseInsensitive() throws Exception {
-    users.createUser("user", "user", UserType.LOCAL, true, false);
-    users.createUser("user_ldap", "user_ldap", UserType.LDAP, true, false);
+    Assert.assertEquals(3, userList.size());
 
+    // check get any user case insensitive
     assertEquals("user", users.getAnyUser("USER").getUserName());
     assertEquals("user_ldap", users.getAnyUser("USER_LDAP").getUserName());
     Assert.assertNull(users.getAnyUser("non_existing"));
-  }
 
-  @Test
-  public void testGetUserById() throws Exception {
-    users.createUser("user", "user", UserType.LOCAL, true, false);
-    User createdUser = users.getUser("user", UserType.LOCAL);
+    // get user by id
     User userById = users.getUser(createdUser.getUserId());
 
     assertNotNull(userById);
     assertEquals(createdUser.getUserId(), userById.getUserId());
-  }
 
-  @Test
-  public void testGetUserByInvalidId() throws Exception {
-    User userById = users.getUser(-1);
+    // get user by invalid id
+    User userByInvalidId = users.getUser(-1);
+
+    assertNull(userByInvalidId);
+
+    // get user if unique
+    Assert.assertNotNull(users.getUserIfUnique("user"));
+
+    users.createUser("user", "user", UserType.LDAP, true, false);
+
+    Assert.assertNull(users.getUserIfUnique("user"));
+
+    //remove user
+    Assert.assertEquals(4, users.getAllUsers().size());
+
+    users.removeUser(users.getAnyUser("user1"));
 
-    assertNull(userById);
+    Assert.assertNull(users.getAnyUser("user1"));
+    Assert.assertEquals(3, users.getAllUsers().size());
   }
 
   @Test
@@ -336,16 +299,11 @@ public class TestUsers {
   }
 
   @Test
-  public void testCreateGroup() throws Exception {
-    final String groupName = "engineering";
-    users.createGroup(groupName);
-    assertNotNull(groupDAO.findGroupByName(groupName));
-  }
-
-  @Test
-  public void testGetGroup() throws Exception {
-    final String groupName = "engineering";
+  public void testCreateGetRemoveGroup() throws Exception {
+    final String groupName = "engineering1";
+    final String groupName2 = "engineering2";
     users.createGroup(groupName);
+    users.createGroup(groupName2);
 
     final Group group = users.getGroup(groupName);
     assertNotNull(group);
@@ -353,71 +311,46 @@ public class TestUsers {
     assertEquals(groupName, group.getGroupName());
 
     assertNotNull(groupDAO.findGroupByName(groupName));
-  }
-
-  @Test
-  public void testGetAllGroups() throws Exception {
-    users.createGroup("one");
-    users.createGroup("two");
 
+    // get all groups
     final List<Group> groupList = users.getAllGroups();
 
     assertEquals(2, groupList.size());
     assertEquals(2, groupDAO.findAll().size());
-  }
 
-  @Test
-  public void testRemoveGroup() throws Exception {
-    final String groupName = "engineering";
-    users.createGroup(groupName);
-    final Group group = users.getGroup(groupName);
-    assertEquals(1, users.getAllGroups().size());
+    // remove group
     users.removeGroup(group);
-    assertEquals(0, users.getAllGroups().size());
-  }
-
-  @Test
-  public void testAddMemberToGroup() throws Exception {
-    final String groupName = "engineering";
-    users.createGroup(groupName);
-    users.createUser("user", "user");
-    users.addMemberToGroup(groupName, "user");
-    assertEquals(1, groupDAO.findGroupByName(groupName).getMemberEntities().size());
+    assertNull(users.getGroup(group.getGroupName()));
+    assertEquals(1, users.getAllGroups().size());
   }
 
   @Test
-  public void testGetAllMembers() throws Exception {
+  public void testMembers() throws Exception {
     final String groupName = "engineering";
+    final String groupName2 = "engineering2";
     users.createGroup(groupName);
+    users.createGroup(groupName2);
     users.createUser("user1", "user1");
     users.createUser("user2", "user2");
     users.createUser("user3", "user3");
     users.addMemberToGroup(groupName, "user1");
     users.addMemberToGroup(groupName, "user2");
     assertEquals(2, users.getAllMembers(groupName).size());
+    assertEquals(0, users.getAllMembers(groupName2).size());
 
     try {
       users.getAllMembers("non existing");
       Assert.fail("It shouldn't be possible to call getAllMembers() on non-existing group");
     } catch (Exception ex) {
     }
-  }
-
-  @Test
-  public void testRemoveMemberFromGroup() throws Exception {
-    final String groupName = "engineering";
-    users.createGroup(groupName);
-    users.createUser("user", "user");
-    users.addMemberToGroup(groupName, "user");
-    assertEquals(1, groupDAO.findGroupByName(groupName).getMemberEntities().size());
-    users.removeMemberFromGroup(groupName, "user");
-    assertEquals(0, groupDAO.findGroupByName(groupName).getMemberEntities().size());
-  }
 
+    // get members from not unexisting group
+    assertEquals(users.getGroupMembers("unexisting"), null);
 
-  @Test
-  public void testGetGroupMembersUnexistingGroup() throws Exception {
-   assertEquals(users.getGroupMembers("unexisting"), null);
+    // remove member from group
+    users.removeMemberFromGroup(groupName, "user1");
+    assertEquals(1, groupDAO.findGroupByName(groupName).getMemberEntities().size());
+    assertEquals("user2", groupDAO.findGroupByName(groupName).getMemberEntities().iterator().next().getUser().getUserName());
   }
 
   @Test
@@ -477,17 +410,6 @@ public class TestUsers {
   }
 
   @Test
-  public void testCreateUserTwoParams() throws Exception {
-    users.createUser("user", "user");
-
-    final User createdUser = users.getAnyUser("user");
-    Assert.assertEquals("user", createdUser.getUserName());
-    Assert.assertEquals(true, createdUser.isActive());
-    Assert.assertEquals(false, createdUser.isLdapUser());
-    Assert.assertEquals(false, createdUser.isAdmin());
-  }
-
-  @Test
   @Ignore // TODO @Transactional annotation breaks this test
   public void testCreateUserDefaultParams() throws Exception {
     final Users spy = Mockito.spy(users);
@@ -496,47 +418,4 @@ public class TestUsers {
   }
 
 
-  @Test(expected = AmbariException.class)
-  public void testCreateUserDuplicate() throws Exception {
-    users.createUser("user", "user");
-    users.createUser("user", "user");
-  }
-
-  @Test(expected = AmbariException.class)
-  public void testCreateUserDuplicateCaseInsensitive() throws Exception {
-    users.createUser("user", "user");
-    users.createUser("USER", "user");
-  }
-
-  @Test
-  public void testRemoveUser() throws Exception {
-    users.createUser("user1", "user1");
-    users.createUser("user2", "user2");
-    users.createUser("user3", "user3");
-    Assert.assertEquals(4, users.getAllUsers().size());
-
-    users.removeUser(users.getAnyUser("user1"));
-
-    Assert.assertNull(users.getAnyUser("user1"));
-    Assert.assertEquals(3, users.getAllUsers().size());
-  }
-
-  @Test
-  public void testCreateUserFiveParams() throws Exception {
-    users.createUser("user", "user", UserType.LOCAL, false, false);
-
-    final User createdUser = users.getAnyUser("user");
-    Assert.assertEquals("user", createdUser.getUserName());
-    Assert.assertEquals(false, createdUser.isActive());
-    Assert.assertEquals(false, createdUser.isLdapUser());
-    Assert.assertEquals(false, createdUser.isAdmin());
-
-    users.createUser("user2", "user2", UserType.LDAP, true, true);
-    final User createdUser2 = users.getAnyUser("user2");
-    Assert.assertEquals("user2", createdUser2.getUserName());
-    Assert.assertEquals(true, createdUser2.isActive());
-    Assert.assertEquals(true, createdUser2.isLdapUser());
-    Assert.assertEquals(true, createdUser2.isAdmin());
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/392a752d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
index bf72e0c..5462f7b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
@@ -43,9 +43,7 @@ import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.dao.HostVersionDAO;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
 import org.apache.ambari.server.orm.entities.HostVersionEntity;
-import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.serveraction.ServerAction;
 import org.apache.ambari.server.state.Cluster;
@@ -66,13 +64,12 @@ import org.apache.ambari.server.state.stack.upgrade.PropertyKeyState;
 import org.apache.ambari.server.state.stack.upgrade.TransferCoercionType;
 import org.apache.ambari.server.state.stack.upgrade.TransferOperation;
 import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
 import com.google.gson.Gson;
 import com.google.inject.Guice;
+import com.google.inject.Inject;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 
@@ -86,84 +83,45 @@ public class ConfigureActionTest {
   private static final StackId HDP_211_STACK = new StackId("HDP-2.1.1");
   private static final StackId HDP_220_STACK = new StackId("HDP-2.2.0");
 
-  private static Injector m_injector;
-  private static OrmTestHelper m_helper;
-  private static RepositoryVersionDAO repoVersionDAO;
-  private static HostVersionDAO hostVersionDAO;
-  private static HostRoleCommandFactory hostRoleCommandFactory;
-  private static ServiceFactory serviceFactory;
-  private static ConfigHelper m_configHelper;
-  private static Clusters clusters;
-  private static ClusterVersionDAO clusterVersionDAO;
-  private static ConfigFactory cf;
-  private static ConfigureAction action;
-  private static HostDAO hostDAO;
-
-  @BeforeClass
-  public static void classSetUp() throws Exception {
-    m_injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    m_injector.getInstance(GuiceJpaInitializer.class);
-
-    m_helper = m_injector.getInstance(OrmTestHelper.class);
-    repoVersionDAO = m_injector.getInstance(RepositoryVersionDAO.class);
-    hostVersionDAO = m_injector.getInstance(HostVersionDAO.class);
-    hostRoleCommandFactory = m_injector.getInstance(HostRoleCommandFactory.class);
-    serviceFactory = m_injector.getInstance(ServiceFactory.class);
-    m_configHelper = m_injector.getInstance(ConfigHelper.class);
-    clusters = m_injector.getInstance(Clusters.class);
-    clusterVersionDAO = m_injector.getInstance(ClusterVersionDAO.class);
-    cf = m_injector.getInstance(ConfigFactory.class);
-    action = m_injector.getInstance(ConfigureAction.class);
-    hostDAO = m_injector.getInstance(HostDAO.class);
-  }
+  @Inject
+  private Injector m_injector;
+  @Inject
+  private OrmTestHelper m_helper;
+  @Inject
+  private RepositoryVersionDAO repoVersionDAO;
+  @Inject
+  private HostVersionDAO hostVersionDAO;
+  @Inject
+  private HostRoleCommandFactory hostRoleCommandFactory;
+  @Inject
+  private ServiceFactory serviceFactory;
+  @Inject
+  private ConfigHelper m_configHelper;
+  @Inject
+  private Clusters clusters;
+  @Inject
+  private ClusterVersionDAO clusterVersionDAO;
+  @Inject
+  private ConfigFactory cf;
+  @Inject
+  private ConfigureAction action;
+  @Inject
+  private HostDAO hostDAO;
+  @Inject
+  private StackDAO stackDAO;
 
   @Before
   public void setup() throws Exception {
-
+    m_injector = Guice.createInjector(new InMemoryDefaultTestModule());
+    m_injector.getInstance(GuiceJpaInitializer.class);
+    m_injector.injectMembers(this);
   }
 
   @After
   public void teardown() throws Exception {
-    cleanup();
-  }
-
-  @AfterClass
-  public static void afterClass() throws Exception {
     m_injector.getInstance(PersistService.class).stop();
   }
 
-  private void cleanup() throws AmbariException {
-    Map<String, Cluster> clusterMap = clusters.getClusters();
-
-    List<ClusterVersionEntity> clusterVersionEntities = clusterVersionDAO.findAll();
-    if (clusterVersionEntities != null) {
-      for (ClusterVersionEntity cve : clusterVersionEntities) {
-        clusterVersionDAO.remove(cve);
-      }
-    }
-
-    List<RepositoryVersionEntity> repositoryVersionEntities = repoVersionDAO.findAll();
-    if (repositoryVersionEntities != null) {
-      for (RepositoryVersionEntity rve : repositoryVersionEntities) {
-        repoVersionDAO.remove(rve);
-      }
-    }
-
-    List<HostVersionEntity> hostVersionEntities = hostVersionDAO.findAll();
-    if (clusterVersionEntities != null) {
-      for (HostVersionEntity hve : hostVersionEntities) {
-        hostVersionDAO.remove(hve);
-      }
-    }
-
-    for (String clusterName : clusterMap.keySet()) {
-      clusters.deleteCluster(clusterName);
-    }
-
-    for (Host host : clusters.getHosts()) {
-      clusters.deleteHost(host.getHostName());
-    }
-  }
 
   @Test
   public void testConfigActionUpgradeAcrossStack() throws Exception {
@@ -1703,7 +1661,7 @@ public class ConfigureActionTest {
 
     clusters.addCluster(clusterName, HDP_220_STACK);
 
-    StackDAO stackDAO = m_injector.getInstance(StackDAO.class);
+
     StackEntity stackEntity = stackDAO.find(HDP_220_STACK.getStackName(),
         HDP_220_STACK.getStackVersion());
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/392a752d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
index f1013c2..50ce7b4 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
@@ -57,8 +57,6 @@ import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.dao.UpgradeDAO;
 import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
 import org.apache.ambari.server.orm.entities.HostVersionEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.entities.RequestEntity;
@@ -85,10 +83,8 @@ import org.apache.ambari.server.state.stack.UpgradePack;
 import org.apache.ambari.server.state.stack.upgrade.Direction;
 import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
 import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
 import com.google.gson.Gson;
@@ -96,6 +92,7 @@ import com.google.gson.JsonArray;
 import com.google.gson.JsonElement;
 import com.google.gson.JsonObject;
 import com.google.inject.Guice;
+import com.google.inject.Inject;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 import com.google.inject.persist.UnitOfWork;
@@ -117,122 +114,67 @@ public class UpgradeActionTest {
 
   private static final String HDP_211_CENTOS6_REPO_URL = "http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.1.1.0-118";
 
-  private static Injector m_injector;
-  private static AmbariManagementController amc;
-  private static OrmTestHelper m_helper;
-  private static RepositoryVersionDAO repoVersionDAO;
-  private static Clusters clusters;
-  private static ClusterVersionDAO clusterVersionDAO;
-  private static HostVersionDAO hostVersionDAO;
-  private static HostDAO hostDAO;
-  private static HostRoleCommandFactory hostRoleCommandFactory;
-  private static ServiceFactory serviceFactory;
-  private static ServiceComponentFactory serviceComponentFactory;
-  private static ServiceComponentHostFactory serviceComponentHostFactory;
-  private static RequestDAO requestDAO;
-  private static UpgradeDAO upgradeDAO;
-  private static ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO;
-  private static HostComponentDesiredStateDAO hostComponentDesiredStateDAO;
-  private static HostComponentStateDAO hostComponentStateDAO;
-  private static StackDAO stackDAO;
-  private static AmbariMetaInfo ambariMetaInfo;
-  private static FinalizeUpgradeAction finalizeUpgradeAction;
-
-  @BeforeClass
-  public static void classSetUp() throws NoSuchFieldException, IllegalAccessException {
+  private Injector m_injector;
+
+  private AmbariManagementController amc;
+  @Inject
+  private OrmTestHelper m_helper;
+  @Inject
+  private RepositoryVersionDAO repoVersionDAO;
+  @Inject
+  private Clusters clusters;
+  @Inject
+  private ClusterVersionDAO clusterVersionDAO;
+  @Inject
+  private HostVersionDAO hostVersionDAO;
+  @Inject
+  private HostDAO hostDAO;
+  @Inject
+  private HostRoleCommandFactory hostRoleCommandFactory;
+  @Inject
+  private ServiceFactory serviceFactory;
+  @Inject
+  private ServiceComponentFactory serviceComponentFactory;
+  @Inject
+  private ServiceComponentHostFactory serviceComponentHostFactory;
+  @Inject
+  private RequestDAO requestDAO;
+  @Inject
+  private UpgradeDAO upgradeDAO;
+  @Inject
+  private ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO;
+  @Inject
+  private HostComponentDesiredStateDAO hostComponentDesiredStateDAO;
+  @Inject
+  private HostComponentStateDAO hostComponentStateDAO;
+  @Inject
+  private StackDAO stackDAO;
+  @Inject
+  private AmbariMetaInfo ambariMetaInfo;
+  @Inject
+  private FinalizeUpgradeAction finalizeUpgradeAction;
+
+  @Before
+  public void setup() throws Exception {
     m_injector = Guice.createInjector(new InMemoryDefaultTestModule());
     m_injector.getInstance(GuiceJpaInitializer.class);
+    m_injector.injectMembers(this);
+    m_injector.getInstance(UnitOfWork.class).begin();
 
-    m_helper = m_injector.getInstance(OrmTestHelper.class);
-    repoVersionDAO = m_injector.getInstance(RepositoryVersionDAO.class);
-    clusters = m_injector.getInstance(Clusters.class);
-    clusterVersionDAO = m_injector.getInstance(ClusterVersionDAO.class);
-    hostVersionDAO = m_injector.getInstance(HostVersionDAO.class);
-    hostDAO = m_injector.getInstance(HostDAO.class);
-    hostRoleCommandFactory = m_injector.getInstance(HostRoleCommandFactory.class);
-    serviceFactory = m_injector.getInstance(ServiceFactory.class);
-    serviceComponentFactory = m_injector.getInstance(ServiceComponentFactory.class);
-    serviceComponentHostFactory = m_injector.getInstance(ServiceComponentHostFactory.class);
-    requestDAO = m_injector.getInstance(RequestDAO.class);
-    upgradeDAO = m_injector.getInstance(UpgradeDAO.class);
-    serviceComponentDesiredStateDAO = m_injector.getInstance(ServiceComponentDesiredStateDAO.class);
-    hostComponentDesiredStateDAO = m_injector.getInstance(HostComponentDesiredStateDAO.class);
-    hostComponentStateDAO = m_injector.getInstance(HostComponentStateDAO.class);
+    // Initialize AmbariManagementController
     amc = m_injector.getInstance(AmbariManagementController.class);
-    stackDAO = m_injector.getInstance(StackDAO.class);
-    ambariMetaInfo = m_injector.getInstance(AmbariMetaInfo.class);
-    finalizeUpgradeAction = m_injector.getInstance(FinalizeUpgradeAction.class);
 
     Field field = AmbariServer.class.getDeclaredField("clusterController");
     field.setAccessible(true);
     field.set(null, amc);
   }
 
-  @Before
-  public void setup() throws Exception {
-    cleanup();
-    m_injector.getInstance(UnitOfWork.class).begin();
-  }
-
   @After
   public void teardown() throws Exception {
     m_injector.getInstance(UnitOfWork.class).end();
-    cleanup();
-  }
-
-  @AfterClass
-  public static void afterClass() throws Exception {
     m_injector.getInstance(PersistService.class).stop();
   }
 
-  private void cleanup() throws AmbariException {
-    Map<String, Cluster> clusterMap = clusters.getClusters();
-
-    List<ClusterVersionEntity> clusterVersionEntities = clusterVersionDAO.findAll();
-    if (clusterVersionEntities != null) {
-      for (ClusterVersionEntity cve : clusterVersionEntities) {
-        clusterVersionDAO.remove(cve);
-      }
-    }
-
-    List<RepositoryVersionEntity> repositoryVersionEntities = repoVersionDAO.findAll();
-    if (repositoryVersionEntities != null) {
-      for (RepositoryVersionEntity rve : repositoryVersionEntities) {
-        repoVersionDAO.remove(rve);
-      }
-    }
-
-    List<HostVersionEntity> hostVersionEntities = hostVersionDAO.findAll();
-    if (clusterVersionEntities != null) {
-      for (HostVersionEntity hve : hostVersionEntities) {
-        hostVersionDAO.remove(hve);
-      }
-    }
-
-    List<HostComponentDesiredStateEntity> hostComponentDesiredStateEntities = hostComponentDesiredStateDAO.findAll();
-    if (hostComponentDesiredStateEntities != null) {
-      for (HostComponentDesiredStateEntity hcdse : hostComponentDesiredStateEntities) {
-        hostComponentDesiredStateDAO.remove(hcdse);
-      }
-    }
-
-    List<HostComponentStateEntity> hostComponentStateEntities = hostComponentStateDAO.findAll();
-    if (hostComponentStateEntities != null) {
-      for (HostComponentStateEntity hcse : hostComponentStateEntities) {
-        hostComponentStateDAO.remove(hcse);
-      }
-    }
-
-    for (String clusterName : clusterMap.keySet()) {
-      clusters.deleteCluster(clusterName);
-    }
-
-    for (Host host : clusters.getHosts()) {
-      clusters.deleteHost(host.getHostName());
-    }
-
-  }
-
   private void makeDowngradeCluster(StackId sourceStack, String sourceRepo, StackId targetStack, String targetRepo) throws Exception {
     String hostName = "h1";
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/392a752d/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
index 897e955..8f00b72 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
@@ -20,8 +20,6 @@ package org.apache.ambari.server.state;
 
 import junit.framework.Assert;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.util.HashMap;
@@ -35,9 +33,7 @@ import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
 import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
 import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
 import com.google.inject.Guice;
@@ -46,17 +42,17 @@ import com.google.inject.persist.PersistService;
 
 public class ServiceTest {
 
-  private static Clusters clusters;
-  private static Cluster cluster;
-  private static String clusterName;
-  private static Injector injector;
-  private static ServiceFactory serviceFactory;
-  private static ServiceComponentFactory serviceComponentFactory;
-  private static ServiceComponentHostFactory serviceComponentHostFactory;
-  private static AmbariMetaInfo metaInfo;
-
-  @BeforeClass
-  public static void classSetUp() throws Exception {
+  private Clusters clusters;
+  private Cluster cluster;
+  private String clusterName;
+  private Injector injector;
+  private ServiceFactory serviceFactory;
+  private ServiceComponentFactory serviceComponentFactory;
+  private ServiceComponentHostFactory serviceComponentHostFactory;
+  private AmbariMetaInfo metaInfo;
+
+  @Before
+  public void setup() throws Exception {
     injector = Guice.createInjector(new InMemoryDefaultTestModule());
     injector.getInstance(GuiceJpaInitializer.class);
     clusters = injector.getInstance(Clusters.class);
@@ -72,26 +68,11 @@ public class ServiceTest {
     Assert.assertNotNull(cluster);
   }
 
-  @Before
-  public void setup() throws Exception {
-
-  }
-
   @After
   public void teardown() throws AmbariException {
-    //injector.getInstance(PersistService.class).stop();
-    cleanup();
-  }
-
-  @AfterClass
-  public static void afterClass() throws Exception {
     injector.getInstance(PersistService.class).stop();
   }
 
-  private void cleanup() throws AmbariException {
-    cluster.deleteAllServices();
-  }
-
   @Test
   public void testCanBeRemoved() throws Exception{
     Service service = cluster.addService("HDFS");
@@ -134,26 +115,6 @@ public class ServiceTest {
   }
 
   @Test
-  public void testCreateService() throws AmbariException {
-    String serviceName = "HDFS";
-    Service s = serviceFactory.createNew(cluster, serviceName);
-    cluster.addService(s);
-    s.persist();
-    Service service = cluster.getService(serviceName);
-
-    Assert.assertNotNull(service);
-    Assert.assertEquals(serviceName, service.getName());
-    Assert.assertEquals(cluster.getClusterId(),
-        service.getCluster().getClusterId());
-    Assert.assertEquals(cluster.getClusterName(),
-        service.getCluster().getClusterName());
-    Assert.assertEquals(State.INIT, service.getDesiredState());
-    Assert.assertEquals(SecurityState.UNSECURED, service.getSecurityState());
-    Assert.assertFalse(
-        service.getDesiredStackVersion().getStackId().isEmpty());
-  }
-
-  @Test
   public void testGetAndSetServiceInfo() throws AmbariException {
     String serviceName = "HDFS";
     Service s = serviceFactory.createNew(cluster, serviceName);
@@ -176,7 +137,7 @@ public class ServiceTest {
 
 
   @Test
-  public void testAddAndGetServiceComponents() throws AmbariException {
+  public void testAddGetDeleteServiceComponents() throws AmbariException {
     String serviceName = "HDFS";
     Service s = serviceFactory.createNew(cluster, serviceName);
     cluster.addService(s);
@@ -185,6 +146,15 @@ public class ServiceTest {
     Service service = cluster.getService(serviceName);
 
     Assert.assertNotNull(service);
+    Assert.assertEquals(serviceName, service.getName());
+    Assert.assertEquals(cluster.getClusterId(),
+            service.getCluster().getClusterId());
+    Assert.assertEquals(cluster.getClusterName(),
+            service.getCluster().getClusterName());
+    Assert.assertEquals(State.INIT, service.getDesiredState());
+    Assert.assertEquals(SecurityState.UNSECURED, service.getSecurityState());
+    Assert.assertFalse(
+            service.getDesiredStackVersion().getStackId().isEmpty());
 
     Assert.assertTrue(s.getServiceComponents().isEmpty());
 
@@ -240,6 +210,10 @@ public class ServiceTest {
     Assert.assertEquals(State.INSTALLING,
         s.getServiceComponent("HDFS_CLIENT").getDesiredState());
 
+    // delete service component
+    s.deleteServiceComponent("NAMENODE");
+
+    assertEquals(3, s.getServiceComponents().size());
   }
 
   @Test
@@ -289,36 +263,6 @@ public class ServiceTest {
   }
 
   @Test
-  public void testDeleteServiceComponent() throws Exception {
-    Service hdfs = cluster.addService("HDFS");
-    Service mapReduce = cluster.addService("MAPREDUCE");
-
-    hdfs.persist();
-
-    ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
-    nameNode.persist();
-    ServiceComponent jobTracker = mapReduce.addServiceComponent("JOBTRACKER");
-
-    assertEquals(2, cluster.getServices().size());
-    assertEquals(1, hdfs.getServiceComponents().size());
-    assertEquals(1, mapReduce.getServiceComponents().size());
-    assertTrue(hdfs.isPersisted());
-    assertFalse(mapReduce.isPersisted());
-
-    hdfs.deleteServiceComponent("NAMENODE");
-
-    assertEquals(0, hdfs.getServiceComponents().size());
-    assertEquals(1, mapReduce.getServiceComponents().size());
-
-    mapReduce.deleteServiceComponent("JOBTRACKER");
-
-    assertEquals(0, hdfs.getServiceComponents().size());
-    assertEquals(0, mapReduce.getServiceComponents().size());
-
-  }
-
-
-  @Test
   public void testServiceMaintenance() throws Exception {
     String serviceName = "HDFS";
     Service s = serviceFactory.createNew(cluster, serviceName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/392a752d/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
index bf58967..243cb4f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
@@ -65,117 +65,74 @@ import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
 import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
 import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.inject.Guice;
+import com.google.inject.Inject;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 
 public class ServiceComponentHostTest {
   private static Logger LOG = LoggerFactory.getLogger(ServiceComponentHostTest.class);
-
-  private static Injector injector;
-  private static Clusters clusters;
-  private static ServiceFactory serviceFactory;
-  private static ServiceComponentFactory serviceComponentFactory;
-  private static ServiceComponentHostFactory serviceComponentHostFactory;
-  private static ConfigFactory configFactory;
-  private static ConfigGroupFactory configGroupFactory;
-  private static OrmTestHelper helper;
-  private static ClusterDAO clusterDAO;
-  private static HostDAO hostDAO;
-  private static HostComponentDesiredStateDAO hostComponentDesiredStateDAO;
-  private static HostComponentStateDAO hostComponentStateDAO;
+  @Inject
+  private Injector injector;
+  @Inject
+  private Clusters clusters;
+  @Inject
+  private ServiceFactory serviceFactory;
+  @Inject
+  private ServiceComponentFactory serviceComponentFactory;
+  @Inject
+  private ServiceComponentHostFactory serviceComponentHostFactory;
+  @Inject
+  private ConfigFactory configFactory;
+  @Inject
+  private ConfigGroupFactory configGroupFactory;
+  @Inject
+  private OrmTestHelper helper;
+  @Inject
+  private ClusterDAO clusterDAO;
+  @Inject
+  private HostDAO hostDAO;
+  @Inject
+  private HostComponentDesiredStateDAO hostComponentDesiredStateDAO;
+  @Inject
+  private HostComponentStateDAO hostComponentStateDAO;
 
   private String clusterName = "c1";
   private String hostName1 = "h1";
   private Map<String, String> hostAttributes = new HashMap<String, String>();
 
-  @BeforeClass
-  public static void classSetUp() {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    clusters = injector.getInstance(Clusters.class);
-    serviceFactory = injector.getInstance(ServiceFactory.class);
-    serviceComponentFactory = injector.getInstance(ServiceComponentFactory.class);
-    serviceComponentHostFactory = injector.getInstance(ServiceComponentHostFactory.class);
-    configFactory = injector.getInstance(ConfigFactory.class);
-    configGroupFactory = injector.getInstance(ConfigGroupFactory.class);
-    helper = injector.getInstance(OrmTestHelper.class);
-    clusterDAO = injector.getInstance(ClusterDAO.class);
-    hostDAO = injector.getInstance(HostDAO.class);
-    hostComponentDesiredStateDAO = injector.getInstance(HostComponentDesiredStateDAO.class);
-    hostComponentStateDAO = injector.getInstance(HostComponentStateDAO.class);
-  }
 
   @Before
   public void setup() throws Exception {
-    cleanup();
-
-    if (clusters.getClusters().size() == 0) {
-      StackId stackId = new StackId("HDP-0.1");
-      createCluster(stackId, clusterName);
-      hostAttributes.put("os_family", "redhat");
-      hostAttributes.put("os_release_version", "5.9");
-
-      Set<String> hostNames = new HashSet<String>();
-      hostNames.add(hostName1);
-      addHostsToCluster(clusterName, hostAttributes, hostNames);
-
-      Cluster c1 = clusters.getCluster(clusterName);
-      helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
-      c1.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
-          RepositoryVersionState.INSTALLING);
-    }
-  }
-
-  @After
-  public void teardown() throws AmbariException {
-    cleanup();
-  }
-
-  @AfterClass
-  public static void afterClass() throws Exception {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  private void cleanup() throws AmbariException {
-    try {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      List<HostComponentDesiredStateEntity> hostComponentDesiredStateEntities = hostComponentDesiredStateDAO.findAll();
-      if (hostComponentDesiredStateEntities != null) {
-        for (HostComponentDesiredStateEntity hcdse : hostComponentDesiredStateEntities) {
-          hostComponentDesiredStateDAO.remove(hcdse);
-        }
-      }
+    injector = Guice.createInjector(new InMemoryDefaultTestModule());
+    injector.getInstance(GuiceJpaInitializer.class);
+    injector.injectMembers(this);
 
-      List<HostComponentStateEntity> hostComponentStateEntities = hostComponentStateDAO.findAll();
-      if (hostComponentStateEntities != null) {
-        for (HostComponentStateEntity hcse : hostComponentStateEntities) {
-          hostComponentStateDAO.remove(hcse);
-        }
-      }
+    StackId stackId = new StackId("HDP-0.1");
+    createCluster(stackId, clusterName);
+    hostAttributes.put("os_family", "redhat");
+    hostAttributes.put("os_release_version", "5.9");
 
-      for (String clusterName : clusterMap.keySet()) {
-        clusters.deleteCluster(clusterName);
-      }
+    Set<String> hostNames = new HashSet<String>();
+    hostNames.add(hostName1);
+    addHostsToCluster(clusterName, hostAttributes, hostNames);
 
-      for (Host host : clusters.getHosts()) {
-        clusters.deleteHost(host.getHostName());
-      }
+    Cluster c1 = clusters.getCluster(clusterName);
+    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+    c1.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
+            RepositoryVersionState.INSTALLING);
+  }
 
-      for (String clusterName : clusterMap.keySet()) {
-        clusters.deleteCluster(clusterName);
-      }
-    } catch (IllegalStateException ise) {}
+  @After
+  public void teardown() {
+    injector.getInstance(PersistService.class).stop();
   }
 
   private ClusterEntity createCluster(StackId stackId, String clusterName) throws AmbariException {
@@ -263,12 +220,6 @@ public class ServiceComponentHostTest {
     return impl;
   }
 
-  @Test
-  public void testNewServiceComponentHost() throws AmbariException{
-    createNewServiceComponentHost(clusterName, "HDFS", "NAMENODE", hostName1, false);
-    createNewServiceComponentHost(clusterName, "HDFS", "HDFS_CLIENT", hostName1, true);
-  }
-
   private ServiceComponentHostEvent createEvent(ServiceComponentHostImpl impl,
       long timestamp, ServiceComponentHostEventType eventType)
       throws AmbariException {
@@ -521,6 +472,18 @@ public class ServiceComponentHostTest {
         State.WIPING_OUT,
         State.INIT);
 
+    // check can be removed
+    for (State state : State.values()) {
+      impl.setState(state);
+
+      if (state.isRemovableState()) {
+        Assert.assertTrue(impl.canBeRemoved());
+      }
+      else {
+        Assert.assertFalse(impl.canBeRemoved());
+      }
+    }
+
   }
 
   @Test
@@ -753,22 +716,6 @@ public class ServiceComponentHostTest {
     }
   }
 
-  @Test
-  public void testCanBeRemoved() throws Exception{
-    ServiceComponentHostImpl impl = (ServiceComponentHostImpl)
-        createNewServiceComponentHost(clusterName, "HDFS", "HDFS_CLIENT", hostName1, true);
-
-    for (State state : State.values()) {
-      impl.setState(state);
-
-      if (state.isRemovableState()) {
-        Assert.assertTrue(impl.canBeRemoved());
-      }
-      else {
-        Assert.assertFalse(impl.canBeRemoved());
-      }
-    }
-  }
 
   @Test
   public void testStaleConfigs() throws Exception {
@@ -1124,8 +1071,8 @@ public class ServiceComponentHostTest {
     Assert.assertNotNull(hostEntity);
 
     ServiceComponentHost sch1 = createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", hostName);
-    ServiceComponentHost sch2 = createNewServiceComponentHost(cluster, "HDFS", "DATANODE", hostName);
-    ServiceComponentHost sch3 = createNewServiceComponentHost(cluster, "MAPREDUCE2", "HISTORYSERVER", hostName);
+    //ServiceComponentHost sch2 = createNewServiceComponentHost(cluster, "HDFS", "DATANODE", hostName);
+    //ServiceComponentHost sch3 = createNewServiceComponentHost(cluster, "MAPREDUCE2", "HISTORYSERVER", hostName);
 
     HostComponentDesiredStateEntityPK pk = new HostComponentDesiredStateEntityPK();
     pk.setClusterId(Long.valueOf(cluster.getClusterId()));

http://git-wip-us.apache.org/repos/asf/ambari/blob/392a752d/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
index e151d73..3bff060 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
@@ -33,7 +33,6 @@ import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
 import static org.easymock.EasyMock.verify;
 
 import java.io.File;
@@ -88,11 +87,7 @@ import org.apache.ambari.server.state.stack.OsFamily;
 import org.easymock.Capture;
 import org.easymock.CaptureType;
 import org.easymock.EasyMockSupport;
-import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
 import com.google.inject.AbstractModule;
@@ -107,14 +102,16 @@ import com.google.inject.persist.PersistService;
  * {@link org.apache.ambari.server.upgrade.UpgradeCatalog210} unit tests.
  */
 public class UpgradeCatalog210Test {
-  private static Injector injector;
+  private Injector injector;
   private Provider<EntityManager> entityManagerProvider = createStrictMock(Provider.class);
   private EntityManager entityManager = createNiceMock(EntityManager.class);
-  private static UpgradeCatalogHelper upgradeCatalogHelper;
-  private static StackEntity desiredStackEntity;
+  private UpgradeCatalogHelper upgradeCatalogHelper;
+  private StackEntity desiredStackEntity;
 
-  @BeforeClass
-  public static void classSetUp() {
+  public void initData() {
+    //reset(entityManagerProvider);
+    //expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
+    //replay(entityManagerProvider);
     injector = Guice.createInjector(new InMemoryDefaultTestModule());
     injector.getInstance(GuiceJpaInitializer.class);
 
@@ -126,22 +123,7 @@ public class UpgradeCatalog210Test {
     desiredStackEntity = stackDAO.find("HDP", "2.2.0");
   }
 
-  @Before
-  public void init() {
-    reset(entityManagerProvider);
-    expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
-    replay(entityManagerProvider);
-
-
-  }
-
-  @After
   public void tearDown() {
-
-  }
-
-  @AfterClass
-  public static void afterClass() throws Exception {
     injector.getInstance(PersistService.class).stop();
   }
 
@@ -821,6 +803,7 @@ public class UpgradeCatalog210Test {
 
   @Test
   public void testDeleteStormRestApiServiceComponent() throws Exception {
+    initData();
     ClusterEntity clusterEntity = upgradeCatalogHelper.createCluster(injector,
       "c1", desiredStackEntity);
     ClusterServiceEntity clusterServiceEntity = upgradeCatalogHelper.createService(
@@ -879,6 +862,7 @@ public class UpgradeCatalog210Test {
 
     Assert.assertNull(componentDesiredStateDAO.findByName(clusterEntity.getClusterId(), "STORM",
         "STORM_REST_API"));
+    tearDown();
   }
 
 


[12/14] ambari git commit: AMBARI-18480 Ubuntu16 Requires libsnappy1v5 As a Dependency For Metrics (dsen)

Posted by jo...@apache.org.
AMBARI-18480 Ubuntu16 Requires libsnappy1v5 As a Dependency For Metrics (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f503655e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f503655e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f503655e

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: f503655e1fb94607c30f5bfe5b3c9a8636757424
Parents: 563124b
Author: Dmytro Sen <ds...@apache.org>
Authored: Thu Sep 29 17:25:26 2016 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Thu Sep 29 17:25:26 2016 +0300

----------------------------------------------------------------------
 .../AMBARI_METRICS/0.1.0/metainfo.xml              | 17 ++++++++++++++++-
 1 file changed, 16 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f503655e/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
index 86f4fac..d2f5862 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
@@ -147,7 +147,7 @@
           </packages>
         </osSpecific>
         <osSpecific>
-          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <osFamily>debian7,ubuntu12,ubuntu14</osFamily>
           <packages>
             <package>
               <name>ambari-metrics-assembly</name>
@@ -162,6 +162,21 @@
           </packages>
         </osSpecific>
         <osSpecific>
+          <osFamily>ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>ambari-metrics-assembly</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+            <package>
+              <name>gcc</name>
+            </package>
+            <package>
+              <name>libsnappy1v5*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
           <osFamily>winsrv6</osFamily>
           <packages>
             <package>


[11/14] ambari git commit: AMBARI-18491. Spark user principal name is not always lowercase in HDP 2.5 (aonishuk)

Posted by jo...@apache.org.
AMBARI-18491. Spark user principal name is not always lowercase in HDP 2.5 (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/563124ba
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/563124ba
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/563124ba

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 563124baeef663967ed214fb88f4f6006c5b2623
Parents: 481a542
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Thu Sep 29 12:55:25 2016 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Thu Sep 29 12:55:25 2016 +0300

----------------------------------------------------------------------
 .../src/main/resources/stacks/HDP/2.5/services/SPARK/kerberos.json | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/563124ba/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/kerberos.json
index 2d52fb3..934f3c6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/kerberos.json
@@ -9,7 +9,7 @@
         {
           "name": "sparkuser",
           "principal": {
-            "value": "${spark-env/spark_user}-${cluster_name}@${realm}",
+            "value": "${spark-env/spark_user}-${cluster_name|toLower()}@${realm}",
             "type" : "user",
             "configuration": "spark-defaults/spark.history.kerberos.principal",
             "local_username" : "${spark-env/spark_user}"


[13/14] ambari git commit: AMBARI-18481. Fix Authentication data is not available error in ConfigGroupResourceProviderTest (rlevas)

Posted by jo...@apache.org.
AMBARI-18481. Fix Authentication data is not available error in ConfigGroupResourceProviderTest (rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5fa2d9b2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5fa2d9b2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5fa2d9b2

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 5fa2d9b22b0ae23b1c8b22a54c51fac76633045e
Parents: f503655
Author: Robert Levas <rl...@hortonworks.com>
Authored: Thu Sep 29 11:09:52 2016 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Thu Sep 29 11:09:52 2016 -0400

----------------------------------------------------------------------
 .../ConfigGroupResourceProviderTest.java        | 229 +++++++++----------
 1 file changed, 112 insertions(+), 117 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5fa2d9b2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
index 4dec990..eb2e0e8 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -19,7 +19,6 @@ package org.apache.ambari.server.controller.internal;
 
 import com.google.inject.Binder;
 import com.google.inject.Guice;
-import com.google.inject.Injector;
 import com.google.inject.Module;
 import com.google.inject.util.Modules;
 import org.apache.ambari.server.controller.AmbariManagementController;
@@ -51,7 +50,6 @@ import org.easymock.Capture;
 import org.easymock.IAnswer;
 import org.junit.Assert;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
 import org.springframework.security.core.Authentication;
 import org.springframework.security.core.context.SecurityContextHolder;
@@ -73,33 +71,28 @@ import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.createMock;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.newCapture;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
 import static org.easymock.EasyMock.createStrictMock;
 
 public class ConfigGroupResourceProviderTest {
 
-  private Injector injector;
-
   private HostDAO hostDAO = null;
 
-  @BeforeClass
-  public static void setupAuthentication() {
+  @Before
+  public void setup() throws Exception {
     // Clear authenticated user so that authorization checks will pass
     SecurityContextHolder.getContext().setAuthentication(null);
-  }
 
-  @Before
-  public void setup() throws Exception {
     hostDAO = createStrictMock(HostDAO.class);
 
     // Create injector after all mocks have been initialized
-    injector = Guice.createInjector(Modules.override(
+    Guice.createInjector(Modules.override(
         new InMemoryDefaultTestModule()).with(new MockModule()));
   }
 
-  ConfigGroupResourceProvider getConfigGroupResourceProvider
+  private ConfigGroupResourceProvider getConfigGroupResourceProvider
       (AmbariManagementController managementController) {
     Resource.Type type = Resource.Type.ConfigGroup;
 
@@ -171,23 +164,22 @@ public class ConfigGroupResourceProviderTest {
     expect(hostEntity1.getHostId()).andReturn(1L).atLeastOnce();
     expect(hostEntity2.getHostId()).andReturn(2L).atLeastOnce();
 
-    Capture<Cluster> clusterCapture = new Capture<Cluster>();
-    Capture<String> captureName = new Capture<String>();
-    Capture<String> captureDesc = new Capture<String>();
-    Capture<String> captureTag = new Capture<String>();
-    Capture<Map<String, Config>> captureConfigs = new Capture<Map<String,
-      Config>>();
-    Capture<Map<Long, Host>> captureHosts = new Capture<Map<Long, Host>>();
+    Capture<Cluster> clusterCapture = newCapture();
+    Capture<String> captureName = newCapture();
+    Capture<String> captureDesc = newCapture();
+    Capture<String> captureTag = newCapture();
+    Capture<Map<String, Config>> captureConfigs = newCapture();
+    Capture<Map<Long, Host>> captureHosts = newCapture();
 
     expect(configGroupFactory.createNew(capture(clusterCapture),
-      capture(captureName), capture(captureTag), capture(captureDesc),
-      capture(captureConfigs), capture(captureHosts))).andReturn(configGroup);
+        capture(captureName), capture(captureTag), capture(captureDesc),
+        capture(captureConfigs), capture(captureHosts))).andReturn(configGroup);
 
     replay(managementController, clusters, cluster, configGroupFactory,
-      configGroup, response, hostDAO, hostEntity1, hostEntity2);
+        configGroup, response, hostDAO, hostEntity1, hostEntity2);
 
     ResourceProvider provider = getConfigGroupResourceProvider
-      (managementController);
+        (managementController);
 
     Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
     Map<String, Object> properties = new LinkedHashMap<String, Object>();
@@ -210,15 +202,15 @@ public class ConfigGroupResourceProviderTest {
     configSet.add(configs);
 
     properties.put(ConfigGroupResourceProvider
-      .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
+        .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
     properties.put(ConfigGroupResourceProvider.CONFIGGROUP_NAME_PROPERTY_ID,
-      "test-1");
+        "test-1");
     properties.put(ConfigGroupResourceProvider.CONFIGGROUP_TAG_PROPERTY_ID,
-      "tag-1");
+        "tag-1");
     properties.put(ConfigGroupResourceProvider.CONFIGGROUP_HOSTS_PROPERTY_ID,
-      hostSet);
+        hostSet);
     properties.put(ConfigGroupResourceProvider.CONFIGGROUP_CONFIGS_PROPERTY_ID,
-      configSet);
+        configSet);
 
     propertySet.add(properties);
 
@@ -229,10 +221,10 @@ public class ConfigGroupResourceProviderTest {
     provider.createResources(request);
 
     verify(managementController, clusters, cluster, configGroupFactory,
-      configGroup, response, hostDAO, hostEntity1, hostEntity2);
+        configGroup, response, hostDAO, hostEntity1, hostEntity2);
 
     assertEquals("version100", captureConfigs.getValue().get("core-site")
-      .getTag());
+        .getTag());
     assertTrue(captureHosts.getValue().containsKey(1L));
     assertTrue(captureHosts.getValue().containsKey(2L));
   }
@@ -280,33 +272,33 @@ public class ConfigGroupResourceProviderTest {
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
     expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
     expect(managementController.getConfigGroupFactory()).andReturn
-      (configGroupFactory).anyTimes();
+        (configGroupFactory).anyTimes();
     expect(managementController.getAuthName()).andReturn("admin").anyTimes();
     expect(cluster.getConfigGroups()).andReturn(configGroupMap);
 
     expect(configGroupFactory.createNew((Cluster) anyObject(), (String) anyObject(),
-      (String) anyObject(), (String) anyObject(), (HashMap) anyObject(),
-      (HashMap) anyObject())).andReturn(configGroup).anyTimes();
+        (String) anyObject(), (String) anyObject(), (HashMap) anyObject(),
+        (HashMap) anyObject())).andReturn(configGroup).anyTimes();
 
     expect(configGroup.getClusterName()).andReturn("Cluster100").anyTimes();
     expect(configGroup.getName()).andReturn("test-1").anyTimes();
     expect(configGroup.getTag()).andReturn("tag-1").anyTimes();
 
     replay(managementController, clusters, cluster, configGroupFactory,
-      configGroup, response);
+        configGroup, response);
 
     ResourceProvider provider = getConfigGroupResourceProvider
-      (managementController);
+        (managementController);
 
     Map<String, Object> properties = new LinkedHashMap<String, Object>();
     Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
 
     properties.put(ConfigGroupResourceProvider
-      .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
+        .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
     properties.put(ConfigGroupResourceProvider.CONFIGGROUP_NAME_PROPERTY_ID,
-      "test-1");
+        "test-1");
     properties.put(ConfigGroupResourceProvider.CONFIGGROUP_TAG_PROPERTY_ID,
-      "tag-1");
+        "tag-1");
 
     propertySet.add(properties);
     Request request = PropertyHelper.getCreateRequest(propertySet, null);
@@ -316,14 +308,14 @@ public class ConfigGroupResourceProviderTest {
     Exception exception = null;
     try {
       provider.createResources(request);
-    } catch (AuthorizationException e){
+    } catch (AuthorizationException e) {
       throw e;
     } catch (Exception e) {
       exception = e;
     }
 
     verify(managementController, clusters, cluster, configGroupFactory,
-      configGroup, response);
+        configGroup, response);
 
     assertNotNull(exception);
     assertTrue(exception instanceof ResourceAlreadyExistsException);
@@ -409,7 +401,7 @@ public class ConfigGroupResourceProviderTest {
     properties.put(ConfigGroupResourceProvider.CONFIGGROUP_TAG_PROPERTY_ID,
         "tag-1");
     properties.put(ConfigGroupResourceProvider.CONFIGGROUP_HOSTS_PROPERTY_ID,
-        hostSet );
+        hostSet);
     properties.put(ConfigGroupResourceProvider.CONFIGGROUP_CONFIGS_PROPERTY_ID,
         configSet);
 
@@ -429,8 +421,7 @@ public class ConfigGroupResourceProviderTest {
     SystemException systemException = null;
     try {
       provider.updateResources(request, predicate);
-    }
-    catch (SystemException e){
+    } catch (SystemException e) {
       systemException = e;
     }
     assertNotNull(systemException);
@@ -481,7 +472,7 @@ public class ConfigGroupResourceProviderTest {
 
     final ConfigGroup configGroup = createNiceMock(ConfigGroup.class);
     ConfigGroupResponse configGroupResponse = createNiceMock
-      (ConfigGroupResponse.class);
+        (ConfigGroupResponse.class);
 
     expect(cluster.isConfigTypeExists("core-site")).andReturn(true).anyTimes();
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
@@ -516,10 +507,10 @@ public class ConfigGroupResourceProviderTest {
     });
 
     replay(managementController, clusters, cluster,
-      configGroup, response, configGroupResponse, configHelper, hostDAO, hostEntity1, hostEntity2, h1, h2);
+        configGroup, response, configGroupResponse, configHelper, hostDAO, hostEntity1, hostEntity2, h1, h2);
 
     ResourceProvider provider = getConfigGroupResourceProvider
-      (managementController);
+        (managementController);
 
     Map<String, Object> properties = new LinkedHashMap<String, Object>();
 
@@ -541,15 +532,15 @@ public class ConfigGroupResourceProviderTest {
     configSet.add(configs);
 
     properties.put(ConfigGroupResourceProvider
-      .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
+        .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
     properties.put(ConfigGroupResourceProvider.CONFIGGROUP_NAME_PROPERTY_ID,
-      "test-1");
+        "test-1");
     properties.put(ConfigGroupResourceProvider.CONFIGGROUP_TAG_PROPERTY_ID,
-      "tag-1");
+        "tag-1");
     properties.put(ConfigGroupResourceProvider.CONFIGGROUP_HOSTS_PROPERTY_ID,
-      hostSet );
+        hostSet);
     properties.put(ConfigGroupResourceProvider.CONFIGGROUP_CONFIGS_PROPERTY_ID,
-      configSet);
+        configSet);
 
     Map<String, String> mapRequestProps = new HashMap<String, String>();
     mapRequestProps.put("context", "Called from a test");
@@ -557,17 +548,17 @@ public class ConfigGroupResourceProviderTest {
     Request request = PropertyHelper.getUpdateRequest(properties, mapRequestProps);
 
     Predicate predicate = new PredicateBuilder().property
-      (ConfigGroupResourceProvider.CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID).equals
-      ("Cluster100").and().
-      property(ConfigGroupResourceProvider.CONFIGGROUP_ID_PROPERTY_ID).equals
-      (25L).toPredicate();
+        (ConfigGroupResourceProvider.CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID).equals
+        ("Cluster100").and().
+        property(ConfigGroupResourceProvider.CONFIGGROUP_ID_PROPERTY_ID).equals
+        (25L).toPredicate();
 
     SecurityContextHolder.getContext().setAuthentication(authentication);
 
     provider.updateResources(request, predicate);
 
     verify(managementController, clusters, cluster,
-      configGroup, response, configGroupResponse, configHelper, hostDAO, hostEntity1, hostEntity2, h1, h2);
+        configGroup, response, configGroupResponse, configHelper, hostDAO, hostEntity1, hostEntity2, h1, h2);
   }
 
   @Test
@@ -607,8 +598,12 @@ public class ConfigGroupResourceProviderTest {
     Cluster cluster = createNiceMock(Cluster.class);
     Host h1 = createNiceMock(Host.class);
     final Long host1Id = 1L;
-    List<Long> hostIds = new ArrayList<Long>() {{ add(host1Id); }};
-    List<String> hostNames = new ArrayList<String>() {{ add("h1"); }};
+    List<Long> hostIds = new ArrayList<Long>() {{
+      add(host1Id);
+    }};
+    List<String> hostNames = new ArrayList<String>() {{
+      add("h1");
+    }};
     HostEntity hostEntity1 = createMock(HostEntity.class);
 
     expect(hostDAO.getHostNamesByHostIds(hostIds)).andReturn(hostNames).atLeastOnce();
@@ -678,8 +673,9 @@ public class ConfigGroupResourceProviderTest {
     replay(managementController, clusters, cluster, hostDAO, hostEntity1,
         configGroup1, configGroup2, configGroup3, configGroup4, response1, response2, response3, response4);
 
-    ResourceProvider resourceProvider = getConfigGroupResourceProvider
-      (managementController);
+    SecurityContextHolder.getContext().setAuthentication(authentication);
+
+    ResourceProvider resourceProvider = getConfigGroupResourceProvider(managementController);
 
     Set<String> propertyIds = new HashSet<String>();
 
@@ -688,125 +684,122 @@ public class ConfigGroupResourceProviderTest {
 
     // Read all
     Predicate predicate = new PredicateBuilder().property
-      (ConfigGroupResourceProvider.CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID)
-      .equals("Cluster100").toPredicate();
+        (ConfigGroupResourceProvider.CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID)
+        .equals("Cluster100").toPredicate();
     Request request = PropertyHelper.getReadRequest(propertyIds);
 
-    Set<Resource> resources = resourceProvider.getResources(request,
-      predicate);
+    Set<Resource> resources = resourceProvider.getResources(request, predicate);
 
     assertEquals(4, resources.size());
 
     // Read by id
     predicate = new PredicateBuilder().property(ConfigGroupResourceProvider
-      .CONFIGGROUP_ID_PROPERTY_ID).equals(1L).and().property
-      (ConfigGroupResourceProvider.CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID)
-      .equals("Cluster100").toPredicate();
-
-    SecurityContextHolder.getContext().setAuthentication(authentication);
+        .CONFIGGROUP_ID_PROPERTY_ID).equals(1L).and().property
+        (ConfigGroupResourceProvider.CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID)
+        .equals("Cluster100").toPredicate();
 
     resources = resourceProvider.getResources(request, predicate);
 
     assertEquals(1, resources.size());
     assertEquals(1L, resources.iterator().next().getPropertyValue
-      (ConfigGroupResourceProvider.CONFIGGROUP_ID_PROPERTY_ID));
+        (ConfigGroupResourceProvider.CONFIGGROUP_ID_PROPERTY_ID));
 
     // Read by Name
     predicate = new PredicateBuilder().property(ConfigGroupResourceProvider
-      .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and()
-      .property(ConfigGroupResourceProvider.CONFIGGROUP_NAME_PROPERTY_ID)
-      .equals("g2").toPredicate();
+        .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and()
+        .property(ConfigGroupResourceProvider.CONFIGGROUP_NAME_PROPERTY_ID)
+        .equals("g2").toPredicate();
 
     resources = resourceProvider.getResources(request, predicate);
 
     assertEquals(1, resources.size());
     assertEquals("g2", resources.iterator().next().getPropertyValue
-      (ConfigGroupResourceProvider.CONFIGGROUP_NAME_PROPERTY_ID));
+        (ConfigGroupResourceProvider.CONFIGGROUP_NAME_PROPERTY_ID));
 
     // Read by tag
     predicate = new PredicateBuilder().property(ConfigGroupResourceProvider
-      .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and()
-      .property(ConfigGroupResourceProvider.CONFIGGROUP_TAG_PROPERTY_ID)
-      .equals("t3").toPredicate();
+        .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and()
+        .property(ConfigGroupResourceProvider.CONFIGGROUP_TAG_PROPERTY_ID)
+        .equals("t3").toPredicate();
 
     resources = resourceProvider.getResources(request, predicate);
 
     assertEquals(1, resources.size());
     assertEquals("t3", resources.iterator().next().getPropertyValue
-      (ConfigGroupResourceProvider.CONFIGGROUP_TAG_PROPERTY_ID));
+        (ConfigGroupResourceProvider.CONFIGGROUP_TAG_PROPERTY_ID));
 
     // Read by hostname (hosts=h1)
     predicate = new PredicateBuilder().property(ConfigGroupResourceProvider
-      .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and()
-      .property(ConfigGroupResourceProvider.CONFIGGROUP_HOSTS_PROPERTY_ID)
-      .equals("h1").toPredicate();
+        .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and()
+        .property(ConfigGroupResourceProvider.CONFIGGROUP_HOSTS_PROPERTY_ID)
+        .equals("h1").toPredicate();
 
     resources = resourceProvider.getResources(request, predicate);
 
     assertEquals(1, resources.size());
     Set<Map<String, Object>> hostSet = (Set<Map<String, Object>>)
-      resources.iterator().next()
-      .getPropertyValue(ConfigGroupResourceProvider
-        .CONFIGGROUP_HOSTS_PROPERTY_ID);
+        resources.iterator().next()
+            .getPropertyValue(ConfigGroupResourceProvider
+                .CONFIGGROUP_HOSTS_PROPERTY_ID);
     assertEquals("h1", hostSet.iterator().next().get
-      (ConfigGroupResourceProvider.CONFIGGROUP_HOSTNAME_PROPERTY_ID));
+        (ConfigGroupResourceProvider.CONFIGGROUP_HOSTNAME_PROPERTY_ID));
 
     // Read by hostname (hosts/host_name=h1)
     predicate = new PredicateBuilder().property(ConfigGroupResourceProvider
-      .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and()
-      .property(ConfigGroupResourceProvider.CONFIGGROUP_HOSTS_HOSTNAME_PROPERTY_ID)
-      .equals("h1").toPredicate();
+        .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and()
+        .property(ConfigGroupResourceProvider.CONFIGGROUP_HOSTS_HOSTNAME_PROPERTY_ID)
+        .equals("h1").toPredicate();
 
     resources = resourceProvider.getResources(request, predicate);
 
     assertEquals(1, resources.size());
     hostSet = (Set<Map<String, Object>>)
-      resources.iterator().next()
-        .getPropertyValue(ConfigGroupResourceProvider
-          .CONFIGGROUP_HOSTS_PROPERTY_ID);
+        resources.iterator().next()
+            .getPropertyValue(ConfigGroupResourceProvider
+                .CONFIGGROUP_HOSTS_PROPERTY_ID);
     assertEquals("h1", hostSet.iterator().next().get
-      (ConfigGroupResourceProvider.CONFIGGROUP_HOSTNAME_PROPERTY_ID));
+        (ConfigGroupResourceProvider.CONFIGGROUP_HOSTNAME_PROPERTY_ID));
 
 
     // Read by tag and hostname (hosts=h1) - Positive
     predicate = new PredicateBuilder().property(ConfigGroupResourceProvider
-      .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and()
-      .property(ConfigGroupResourceProvider.CONFIGGROUP_TAG_PROPERTY_ID)
-      .equals("t4").and().property(ConfigGroupResourceProvider
-        .CONFIGGROUP_HOSTS_PROPERTY_ID).equals(host1Id).toPredicate();
+        .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and()
+        .property(ConfigGroupResourceProvider.CONFIGGROUP_TAG_PROPERTY_ID)
+        .equals("t4").and().property(ConfigGroupResourceProvider
+            .CONFIGGROUP_HOSTS_PROPERTY_ID).equals(host1Id).toPredicate();
 
     resources = resourceProvider.getResources(request, predicate);
 
     assertEquals(1, resources.size());
     hostSet = (Set<Map<String, Object>>)
-      resources.iterator().next()
-        .getPropertyValue(ConfigGroupResourceProvider
-          .CONFIGGROUP_HOSTS_PROPERTY_ID);
+        resources.iterator().next()
+            .getPropertyValue(ConfigGroupResourceProvider
+                .CONFIGGROUP_HOSTS_PROPERTY_ID);
     assertEquals("h1", hostSet.iterator().next().get
-      (ConfigGroupResourceProvider.CONFIGGROUP_HOSTNAME_PROPERTY_ID));
+        (ConfigGroupResourceProvider.CONFIGGROUP_HOSTNAME_PROPERTY_ID));
 
     // Read by tag and hostname (hosts/host_name=h1) - Positive
     predicate = new PredicateBuilder().property(ConfigGroupResourceProvider
-      .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and()
-      .property(ConfigGroupResourceProvider.CONFIGGROUP_TAG_PROPERTY_ID)
-      .equals("t4").and().property(ConfigGroupResourceProvider
-        .CONFIGGROUP_HOSTS_HOSTNAME_PROPERTY_ID).equals("h1").toPredicate();
+        .CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and()
+        .property(ConfigGroupResourceProvider.CONFIGGROUP_TAG_PROPERTY_ID)
+        .equals("t4").and().property(ConfigGroupResourceProvider
+            .CONFIGGROUP_HOSTS_HOSTNAME_PROPERTY_ID).equals("h1").toPredicate();
 
     resources = resourceProvider.getResources(request, predicate);
 
     assertEquals(1, resources.size());
     hostSet = (Set<Map<String, Object>>)
-      resources.iterator().next()
-        .getPropertyValue(ConfigGroupResourceProvider
-          .CONFIGGROUP_HOSTS_PROPERTY_ID);
+        resources.iterator().next()
+            .getPropertyValue(ConfigGroupResourceProvider
+                .CONFIGGROUP_HOSTS_PROPERTY_ID);
     assertEquals("h1", hostSet.iterator().next().get
-      (ConfigGroupResourceProvider.CONFIGGROUP_HOSTNAME_PROPERTY_ID));
+        (ConfigGroupResourceProvider.CONFIGGROUP_HOSTNAME_PROPERTY_ID));
 
     // Read by id
     predicate = new PredicateBuilder().property(ConfigGroupResourceProvider
-      .CONFIGGROUP_ID_PROPERTY_ID).equals(11L).and().property
-      (ConfigGroupResourceProvider.CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID)
-      .equals("Cluster100").toPredicate();
+        .CONFIGGROUP_ID_PROPERTY_ID).equals(11L).and().property
+        (ConfigGroupResourceProvider.CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID)
+        .equals("Cluster100").toPredicate();
 
     NoSuchResourceException resourceException = null;
     try {
@@ -866,16 +859,16 @@ public class ConfigGroupResourceProviderTest {
     replay(managementController, clusters, cluster, configGroup);
 
     ResourceProvider resourceProvider = getConfigGroupResourceProvider
-      (managementController);
+        (managementController);
 
     AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver();
 
     ((ObservableResourceProvider) resourceProvider).addObserver(observer);
 
     Predicate predicate = new PredicateBuilder().property
-      (ConfigGroupResourceProvider.CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID)
-      .equals("Cluster100").and().property(ConfigGroupResourceProvider
-        .CONFIGGROUP_ID_PROPERTY_ID).equals(1L).toPredicate();
+        (ConfigGroupResourceProvider.CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID)
+        .equals("Cluster100").and().property(ConfigGroupResourceProvider
+            .CONFIGGROUP_ID_PROPERTY_ID).equals(1L).toPredicate();
 
     SecurityContextHolder.getContext().setAuthentication(authentication);
 
@@ -909,7 +902,9 @@ public class ConfigGroupResourceProviderTest {
     desiredConfigProperties.add(desiredConfig1);
 
     Map<String, Object> properties = new HashMap<String, Object>();
-    properties.put("ConfigGroup/hosts", new HashMap<String, String>(){{put("host_name", "ambari1");}});
+    properties.put("ConfigGroup/hosts", new HashMap<String, String>() {{
+      put("host_name", "ambari1");
+    }});
     properties.put("ConfigGroup/cluster_name", "c");
     properties.put("ConfigGroup/desired_configs", desiredConfigProperties);
 


[08/14] ambari git commit: AMBARI-18270. Host delete should support a force option to delete all components (smohanty)

Posted by jo...@apache.org.
AMBARI-18270. Host delete should support a force option to delete all components (smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7ab43807
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7ab43807
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7ab43807

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 7ab438079f0e1a12fab52bc8abbdc73040473195
Parents: 392a752
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Wed Sep 28 22:55:50 2016 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Wed Sep 28 22:55:50 2016 -0700

----------------------------------------------------------------------
 .../internal/HostResourceProvider.java          | 130 ++++++++++++++-----
 .../server/controller/internal/RequestImpl.java |   2 +-
 .../AmbariManagementControllerTest.java         | 106 +++++++++++++++
 .../internal/HostResourceProviderTest.java      |  15 ++-
 4 files changed, 219 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7ab43807/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
index 4673c73..73bc908 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
@@ -41,6 +41,7 @@ import org.apache.ambari.server.controller.HostRequest;
 import org.apache.ambari.server.controller.HostResponse;
 import org.apache.ambari.server.controller.MaintenanceStateHelper;
 import org.apache.ambari.server.controller.RequestStatusResponse;
+import org.apache.ambari.server.controller.ServiceComponentHostRequest;
 import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
 import org.apache.ambari.server.controller.spi.NoSuchResourceException;
 import org.apache.ambari.server.controller.spi.Predicate;
@@ -64,12 +65,14 @@ import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.apache.ambari.server.topology.ClusterTopology;
 import org.apache.ambari.server.topology.InvalidTopologyException;
 import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
 import org.apache.ambari.server.topology.LogicalRequest;
 import org.apache.ambari.server.topology.TopologyManager;
+import org.apache.ambari.server.update.HostUpdateHelper;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -130,7 +133,6 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
   public static final String HOST_RECOVERY_SUMMARY_PROPERTY_ID =
       PropertyHelper.getPropertyId("Hosts", "recovery_summary");
   public static final String HOST_STATE_PROPERTY_ID =
-
       PropertyHelper.getPropertyId("Hosts", "host_state");
   public static final String HOST_LAST_AGENT_ENV_PROPERTY_ID =
       PropertyHelper.getPropertyId("Hosts", "last_agent_env");
@@ -152,6 +154,8 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
   public static final String HOST_RACK_INFO_NO_CATEGORY_PROPERTY_ID =
       PropertyHelper.getPropertyId(null, "rack_info");
 
+  protected static final String FORCE_DELETE_COMPONENTS = "force_delete_components";
+
 
   private static Set<String> pkPropertyIds =
       new HashSet<String>(Arrays.asList(new String[]{
@@ -335,6 +339,10 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
       throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
 
     final Set<HostRequest> requests = new HashSet<>();
+    Map<String, String> requestInfoProperties = request.getRequestInfoProperties();
+    final boolean forceDelete = requestInfoProperties.containsKey(FORCE_DELETE_COMPONENTS) &&
+                  requestInfoProperties.get(FORCE_DELETE_COMPONENTS).equals("true");
+
     for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
       requests.add(getRequest(propertyMap));
     }
@@ -342,11 +350,13 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
     DeleteStatusMetaData deleteStatusMetaData = modifyResources(new Command<DeleteStatusMetaData>() {
       @Override
       public DeleteStatusMetaData invoke() throws AmbariException {
-        return deleteHosts(requests, request.isDryRunRequest());
+        return deleteHosts(requests, request.isDryRunRequest(), forceDelete);
       }
     });
 
-    notifyDelete(Resource.Type.Host, predicate);
+    if(!request.isDryRunRequest()) {
+      notifyDelete(Resource.Type.Host, predicate);
+    }
 
     return getRequestStatus(null, null, deleteStatusMetaData);
   }
@@ -834,7 +844,7 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
   }
 
   @Transactional
-  protected DeleteStatusMetaData deleteHosts(Set<HostRequest> requests, boolean dryRun)
+  protected DeleteStatusMetaData deleteHosts(Set<HostRequest> requests, boolean dryRun, boolean forceDelete)
       throws AmbariException {
 
     AmbariManagementController controller = getManagementController();
@@ -849,7 +859,7 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
       }
 
       try {
-        validateHostInDeleteFriendlyState(hostRequest, clusters);
+        validateHostInDeleteFriendlyState(hostRequest, clusters, forceDelete);
         okToRemove.add(hostRequest);
       } catch (Exception ex) {
         deleteStatusMetaData.addException(hostName, ex);
@@ -867,16 +877,19 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
 
     //Do not break behavior for existing clients where delete request contains only 1 host.
     //Response for these requests will have empty body with appropriate error code.
-    if (deleteStatusMetaData.getDeletedKeys().size() + deleteStatusMetaData.getExceptionForKeys().size() == 1) {
-      if (deleteStatusMetaData.getDeletedKeys().size() == 1) {
-        return null;
-      }
-      for (Map.Entry<String, Exception> entry : deleteStatusMetaData.getExceptionForKeys().entrySet()) {
-        Exception ex =  entry.getValue();
-        if (ex instanceof AmbariException) {
-          throw (AmbariException)ex;
-        } else {
-          throw new AmbariException(ex.getMessage(), ex);
+    //dryRun is a new feature so its ok to unify the behavior
+    if (!dryRun) {
+      if (deleteStatusMetaData.getDeletedKeys().size() + deleteStatusMetaData.getExceptionForKeys().size() == 1) {
+        if (deleteStatusMetaData.getDeletedKeys().size() == 1) {
+          return null;
+        }
+        for (Map.Entry<String, Exception> entry : deleteStatusMetaData.getExceptionForKeys().entrySet()) {
+          Exception ex = entry.getValue();
+          if (ex instanceof AmbariException) {
+            throw (AmbariException) ex;
+          } else {
+            throw new AmbariException(ex.getMessage(), ex);
+          }
         }
       }
     }
@@ -888,6 +901,38 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
     for (HostRequest hostRequest : requests) {
       // Assume the user also wants to delete it entirely, including all clusters.
       String hostname = hostRequest.getHostname();
+
+      // delete all host components
+      Set<ServiceComponentHostRequest> schrs = new HashSet<>();
+      for(Cluster cluster : clusters.getClustersForHost(hostname)) {
+        List<ServiceComponentHost> list = cluster.getServiceComponentHosts(hostname);
+        for (ServiceComponentHost sch : list) {
+          ServiceComponentHostRequest schr = new ServiceComponentHostRequest(cluster.getClusterName(),
+                                                                             sch.getServiceName(),
+                                                                             sch.getServiceComponentName(),
+                                                                             sch.getHostName(),
+                                                                             null);
+          schrs.add(schr);
+        }
+      }
+      DeleteStatusMetaData componentDeleteStatus = null;
+      if(schrs.size() > 0) {
+        try {
+          componentDeleteStatus = getManagementController().deleteHostComponents(schrs);
+        } catch (Exception ex) {
+          deleteStatusMetaData.addException(hostname, ex);
+        }
+      }
+
+      if (componentDeleteStatus != null) {
+        for (String key : componentDeleteStatus.getDeletedKeys()) {
+          deleteStatusMetaData.addDeletedKey(key);
+        }
+        for (String key : componentDeleteStatus.getExceptionForKeys().keySet()) {
+          deleteStatusMetaData.addException(key, componentDeleteStatus.getExceptionForKeys().get(key));
+        }
+      }
+
       try {
         clusters.deleteHost(hostname);
         deleteStatusMetaData.addDeletedKey(hostname);
@@ -905,37 +950,57 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
     }
   }
 
-  private void validateHostInDeleteFriendlyState(HostRequest hostRequest, Clusters clusters ) throws AmbariException {
+  private void validateHostInDeleteFriendlyState(HostRequest hostRequest, Clusters clusters, boolean forceDelete) throws AmbariException {
     Set<String> clusterNamesForHost = new HashSet<>();
     String hostName = hostRequest.getHostname();
-      if (null != hostRequest.getClusterName()) {
-        clusterNamesForHost.add(hostRequest.getClusterName());
-      } else {
-        Set<Cluster> clustersForHost = clusters.getClustersForHost(hostRequest.getHostname());
-        if (null != clustersForHost) {
-          for (Cluster c : clustersForHost) {
-            clusterNamesForHost.add(c.getClusterName());
-          }
+    if (null != hostRequest.getClusterName()) {
+      clusterNamesForHost.add(hostRequest.getClusterName());
+    } else {
+      Set<Cluster> clustersForHost = clusters.getClustersForHost(hostRequest.getHostname());
+      if (null != clustersForHost) {
+        for (Cluster c : clustersForHost) {
+          clusterNamesForHost.add(c.getClusterName());
         }
       }
+    }
 
-      for (String clusterName : clusterNamesForHost) {
-        Cluster cluster = clusters.getCluster(clusterName);
+    for (String clusterName : clusterNamesForHost) {
+      Cluster cluster = clusters.getCluster(clusterName);
 
-        List<ServiceComponentHost> list = cluster.getServiceComponentHosts(hostName);
+      List<ServiceComponentHost> list = cluster.getServiceComponentHosts(hostName);
 
-        if (!list.isEmpty()) {
-          List<String> componentsToRemove = new ArrayList<>();
-          for (ServiceComponentHost sch : list) {
-            componentsToRemove.add(sch.getServiceComponentName());
+      if (!list.isEmpty()) {
+        List<String> componentsToRemove = new ArrayList<>();
+        List<String> componentsStarted = new ArrayList<>();
+        for (ServiceComponentHost sch : list) {
+          componentsToRemove.add(sch.getServiceComponentName());
+          if (sch.getState() == State.STARTED) {
+            componentsStarted.add(sch.getServiceComponentName());
           }
+        }
 
+        if (forceDelete) {
+          // error if components are running
+          if (!componentsStarted.isEmpty()) {
+            StringBuilder reason = new StringBuilder("Cannot remove host ")
+                .append(hostName)
+                .append(" from ")
+                .append(hostRequest.getClusterName())
+                .append(
+                    ".  The following roles exist, and these components must be stopped: ");
+
+            reason.append(StringUtils.join(componentsToRemove, ", "));
+
+            throw new AmbariException(reason.toString());
+          }
+        } else {
           if (!componentsToRemove.isEmpty()) {
             StringBuilder reason = new StringBuilder("Cannot remove host ")
                 .append(hostName)
                 .append(" from ")
                 .append(hostRequest.getClusterName())
-                .append(".  The following roles exist, and these components must be stopped if running, and then deleted: ");
+                .append(
+                    ".  The following roles exist, and these components must be stopped if running, and then deleted: ");
 
             reason.append(StringUtils.join(componentsToRemove, ", "));
 
@@ -943,6 +1008,7 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
           }
         }
       }
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/7ab43807/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestImpl.java
index 36ad4c3..aaeefd7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestImpl.java
@@ -85,7 +85,7 @@ public class RequestImpl implements Request {
    * @param requestInfoProperties  request properties; may be null
    * @param mapTemporalInfo        temporal info
    */
-  public RequestImpl(Set<String> propertyIds, Set<Map<String, Object>> properties,
+  public  RequestImpl(Set<String> propertyIds, Set<Map<String, Object>> properties,
                      Map<String, String> requestInfoProperties, Map<String,TemporalInfo> mapTemporalInfo) {
     this(propertyIds, properties, requestInfoProperties, mapTemporalInfo, null, null);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7ab43807/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index db5adff..197b925 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -8949,6 +8949,112 @@ public class AmbariManagementControllerTest {
   }
 
   @Test
+  public void testDeleteHostComponentWithForce() throws Exception {
+    String cluster1 = getUniqueName();
+
+    createCluster(cluster1);
+
+    Cluster cluster = clusters.getCluster(cluster1);
+    cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
+
+    String serviceName = "HDFS";
+    createService(cluster1, serviceName, null);
+    String componentName1 = "NAMENODE";
+    String componentName2 = "DATANODE";
+    String componentName3 = "HDFS_CLIENT";
+
+    createServiceComponent(cluster1, serviceName, componentName1, State.INIT);
+    createServiceComponent(cluster1, serviceName, componentName2, State.INIT);
+    createServiceComponent(cluster1, serviceName, componentName3, State.INIT);
+
+    String host1 = getUniqueName();  // Host will belong to the cluster and contain components
+
+    addHostToCluster(host1, cluster1);
+
+    // Add components to host1
+    createServiceComponentHost(cluster1, serviceName, componentName1, host1, null);
+    createServiceComponentHost(cluster1, serviceName, componentName2, host1, null);
+    createServiceComponentHost(cluster1, serviceName, componentName3, host1, null);
+
+    // Install
+    installService(cluster1, serviceName, false, false);
+
+    // Treat host components on host1 as up and healthy
+    Map<String, ServiceComponentHost> hostComponents = cluster.getService(serviceName).getServiceComponent(componentName1).getServiceComponentHosts();
+    for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
+      ServiceComponentHost cHost = entry.getValue();
+      cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), cluster.getDesiredStackVersion().getStackId()));
+      cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
+    }
+    hostComponents = cluster.getService(serviceName).getServiceComponent(componentName2).getServiceComponentHosts();
+    for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
+      ServiceComponentHost cHost = entry.getValue();
+      cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), cluster.getDesiredStackVersion().getStackId()));
+      cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
+    }
+
+    // Case 1: Attempt delete when components still exist
+    Set<HostRequest> requests = new HashSet<HostRequest>();
+    requests.clear();
+    requests.add(new HostRequest(host1, cluster1, null));
+    try {
+      HostResourceProviderTest.deleteHosts(controller, requests, false, false);
+      fail("Expect failure deleting hosts when components exist and have not been deleted.");
+    } catch (Exception e) {
+      LOG.info("Exception is - " + e.getMessage());
+      Assert.assertTrue(e.getMessage().contains("these components must be stopped if running, and then deleted"));
+    }
+
+    Service s = cluster.getService(serviceName);
+    s.getServiceComponent("DATANODE").getServiceComponentHost(host1).setState(State.STARTED);
+    try {
+      HostResourceProviderTest.deleteHosts(controller, requests, false, true);
+      fail("Expect failure deleting hosts when components exist and have not been stopped.");
+    } catch (Exception e) {
+      LOG.info("Exception is - " + e.getMessage());
+      Assert.assertTrue(e.getMessage().contains("these components must be stopped:"));
+    }
+
+    DeleteStatusMetaData data = null;
+    try {
+      data = HostResourceProviderTest.deleteHosts(controller, requests, true, true);
+      Assert.assertTrue(data.getDeletedKeys().size() == 0);
+    } catch (Exception e) {
+      LOG.info("Exception is - " + e.getMessage());
+      fail("Do not expect failure deleting hosts when components exist and are stopped.");
+    }
+
+    LOG.info("Test dry run of delete with all host components");
+    s.getServiceComponent("DATANODE").getServiceComponentHost(host1).setState(State.INSTALLED);
+    try {
+      data = HostResourceProviderTest.deleteHosts(controller, requests, true, true);
+      Assert.assertTrue(data.getDeletedKeys().size() == 1);
+    } catch (Exception e) {
+      LOG.info("Exception is - " + e.getMessage());
+      fail("Do not expect failure deleting hosts when components exist and are stopped.");
+    }
+
+    LOG.info("Test successful delete with all host components");
+    s.getServiceComponent("DATANODE").getServiceComponentHost(host1).setState(State.INSTALLED);
+    try {
+      data = HostResourceProviderTest.deleteHosts(controller, requests, false, true);
+      Assert.assertNotNull(data);
+      Assert.assertTrue(4 == data.getDeletedKeys().size());
+      Assert.assertTrue(0 == data.getExceptionForKeys().size());
+    } catch (Exception e) {
+      LOG.info("Exception is - " + e.getMessage());
+      fail("Do not expect failure deleting hosts when components exist and are stopped.");
+    }
+    // Verify host does not exist
+    try {
+      clusters.getHost(host1);
+      Assert.fail("Expected a HostNotFoundException.");
+    } catch (HostNotFoundException e) {
+      // expected
+    }
+  }
+
+  @Test
   public void testDeleteHost() throws Exception {
     String cluster1 = getUniqueName();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7ab43807/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
index 260ff92..08dd591 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
@@ -1340,7 +1340,20 @@ public class HostResourceProviderTest extends EasyMockSupport {
 
     HostResourceProvider provider = getHostProvider(controller);
     HostResourceProvider.setTopologyManager(topologyManager);
-    provider.deleteHosts(requests, false);
+    provider.deleteHosts(requests, false, false);
+  }
+
+  public static DeleteStatusMetaData deleteHosts(AmbariManagementController controller,
+                                                 Set<HostRequest> requests, boolean dryRun, boolean forceDelete)
+      throws AmbariException {
+    TopologyManager topologyManager = EasyMock.createNiceMock(TopologyManager.class);
+    expect(topologyManager.getRequests(Collections.EMPTY_LIST)).andReturn(Collections.EMPTY_LIST).anyTimes();
+
+    replay(topologyManager);
+
+    HostResourceProvider provider = getHostProvider(controller);
+    HostResourceProvider.setTopologyManager(topologyManager);
+    return provider.deleteHosts(requests, dryRun, forceDelete);
   }
 
   public static void updateHosts(AmbariManagementController controller, Set<HostRequest> requests)


[06/14] ambari git commit: AMBARI-18477. Alert unit tests failing on trunk (aonishuk)

Posted by jo...@apache.org.
AMBARI-18477. Alert unit tests failing on trunk (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/98f0cfc3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/98f0cfc3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/98f0cfc3

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 98f0cfc3d9397c6c834291611a0655fc691b0ac2
Parents: 2642595
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Wed Sep 28 21:39:27 2016 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Wed Sep 28 21:39:27 2016 +0300

----------------------------------------------------------------------
 .../server/api/services/AmbariMetaInfoTest.java       | 14 +++++++-------
 .../server/metadata/AgentAlertDefinitionsTest.java    |  2 +-
 2 files changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/98f0cfc3/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 4b7826e..9f79a32 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -1925,7 +1925,7 @@ public class AmbariMetaInfoTest {
 
     AlertDefinitionDAO dao = injector.getInstance(AlertDefinitionDAO.class);
     List<AlertDefinitionEntity> definitions = dao.findAll(clusterId);
-    assertEquals(10, definitions.size());
+    assertEquals(11, definitions.size());
 
     // figure out how many of these alerts were merged into from the
     // non-stack alerts.json
@@ -1937,7 +1937,7 @@ public class AmbariMetaInfoTest {
       }
     }
 
-    assertEquals(1, hostAlertCount);
+    assertEquals(2, hostAlertCount);
     assertEquals(9, definitions.size() - hostAlertCount);
 
     for (AlertDefinitionEntity definition : definitions) {
@@ -1948,7 +1948,7 @@ public class AmbariMetaInfoTest {
     metaInfo.reconcileAlertDefinitions(clusters);
 
     definitions = dao.findAll();
-    assertEquals(10, definitions.size());
+    assertEquals(11, definitions.size());
 
     for (AlertDefinitionEntity definition : definitions) {
       assertEquals(28, definition.getScheduleInterval().intValue());
@@ -1957,7 +1957,7 @@ public class AmbariMetaInfoTest {
     // find all enabled for the cluster should find 6 (the ones from HDFS;
     // it will not find the agent alert since it's not bound to the cluster)
     definitions = dao.findAllEnabled(cluster.getClusterId());
-    assertEquals(9, definitions.size());
+    assertEquals(10, definitions.size());
 
     // create new definition
     AlertDefinitionEntity entity = new AlertDefinitionEntity();
@@ -1976,19 +1976,19 @@ public class AmbariMetaInfoTest {
 
     // verify the new definition is found (6 HDFS + 1 new one)
     definitions = dao.findAllEnabled(cluster.getClusterId());
-    assertEquals(10, definitions.size());
+    assertEquals(11, definitions.size());
 
     // reconcile, which should disable our bad definition
     metaInfo.reconcileAlertDefinitions(clusters);
 
     // find all enabled for the cluster should find 6
     definitions = dao.findAllEnabled(cluster.getClusterId());
-    assertEquals(9, definitions.size());
+    assertEquals(10, definitions.size());
 
     // find all should find 6 HDFS + 1 disabled + 1 agent alert + 2 server
     // alerts
     definitions = dao.findAll();
-    assertEquals(11, definitions.size());
+    assertEquals(12, definitions.size());
 
     entity = dao.findById(entity.getDefinitionId());
     assertFalse(entity.getEnabled());

http://git-wip-us.apache.org/repos/asf/ambari/blob/98f0cfc3/ambari-server/src/test/java/org/apache/ambari/server/metadata/AgentAlertDefinitionsTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/metadata/AgentAlertDefinitionsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/metadata/AgentAlertDefinitionsTest.java
index f5ce11e..970ceb9 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/metadata/AgentAlertDefinitionsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/metadata/AgentAlertDefinitionsTest.java
@@ -49,7 +49,7 @@ public class AgentAlertDefinitionsTest {
   public void testLoadingAgentHostAlerts() {
     AmbariServiceAlertDefinitions ambariServiceAlertDefinitions = m_injector.getInstance(AmbariServiceAlertDefinitions.class);
     List<AlertDefinition> definitions = ambariServiceAlertDefinitions.getAgentDefinitions();
-    Assert.assertEquals(1, definitions.size());
+    Assert.assertEquals(2, definitions.size());
 
     for( AlertDefinition definition : definitions){
       Assert.assertEquals(Components.AMBARI_AGENT.name(),


[10/14] ambari git commit: AMBARI-18484 Add HDP-2.6 stack deriving from 2.5 (dsen)

Posted by jo...@apache.org.
AMBARI-18484 Add HDP-2.6 stack deriving from 2.5 (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/481a5429
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/481a5429
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/481a5429

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 481a542979ac523ee1a6c46a36a6213583cb0951
Parents: e1b1157
Author: Dmytro Sen <ds...@apache.org>
Authored: Thu Sep 29 12:38:45 2016 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Thu Sep 29 12:38:45 2016 +0300

----------------------------------------------------------------------
 .../main/resources/stacks/HDP/2.6/metainfo.xml  |  23 ++++
 .../resources/stacks/HDP/2.6/repos/repoinfo.xml | 132 +++++++++++++++++++
 .../HDP/2.6/services/ACCUMULO/metainfo.xml      |  26 ++++
 .../stacks/HDP/2.6/services/ATLAS/metainfo.xml  |  26 ++++
 .../stacks/HDP/2.6/services/FALCON/metainfo.xml |  26 ++++
 .../stacks/HDP/2.6/services/FLUME/metainfo.xml  |  26 ++++
 .../stacks/HDP/2.6/services/HBASE/metainfo.xml  |  26 ++++
 .../stacks/HDP/2.6/services/HDFS/metainfo.xml   |  26 ++++
 .../stacks/HDP/2.6/services/HIVE/metainfo.xml   |  26 ++++
 .../stacks/HDP/2.6/services/KAFKA/metainfo.xml  |  26 ++++
 .../HDP/2.6/services/KERBEROS/metainfo.xml      |  25 ++++
 .../stacks/HDP/2.6/services/KNOX/metainfo.xml   |  26 ++++
 .../stacks/HDP/2.6/services/MAHOUT/metainfo.xml |  26 ++++
 .../stacks/HDP/2.6/services/OOZIE/metainfo.xml  |  25 ++++
 .../stacks/HDP/2.6/services/PIG/metainfo.xml    |  26 ++++
 .../stacks/HDP/2.6/services/RANGER/metainfo.xml |  29 ++++
 .../HDP/2.6/services/RANGER_KMS/metainfo.xml    |  29 ++++
 .../stacks/HDP/2.6/services/SLIDER/metainfo.xml |  26 ++++
 .../stacks/HDP/2.6/services/SPARK/metainfo.xml  |  29 ++++
 .../stacks/HDP/2.6/services/SPARK2/metainfo.xml |  29 ++++
 .../stacks/HDP/2.6/services/SQOOP/metainfo.xml  |  26 ++++
 .../stacks/HDP/2.6/services/STORM/metainfo.xml  |  27 ++++
 .../stacks/HDP/2.6/services/TEZ/metainfo.xml    |  26 ++++
 .../stacks/HDP/2.6/services/YARN/metainfo.xml   |  27 ++++
 .../HDP/2.6/services/ZEPPELIN/metainfo.xml      |  27 ++++
 .../HDP/2.6/services/ZOOKEEPER/metainfo.xml     |  26 ++++
 .../stacks/HDP/2.6/services/stack_advisor.py    |  21 +++
 27 files changed, 813 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/metainfo.xml
new file mode 100644
index 0000000..729b6ee
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/metainfo.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <versions>
+    <active>true</active>
+  </versions>
+  <extends>2.5</extends>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml
new file mode 100644
index 0000000..1385fb4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/repos/repoinfo.xml
@@ -0,0 +1,132 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <latest>http://public-repo-1.hortonworks.com/HDP/hdp_urlinfo.json</latest>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0</baseurl>
+      <repoid>HDP-2.6</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="redhat7">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.5.0.0</baseurl>
+      <repoid>HDP-2.6</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos7</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="suse11">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11sp3/2.x/updates/2.5.0.0</baseurl>
+      <repoid>HDP-2.6</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/suse11sp3</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="suse12">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/sles12/2.x/updates/2.5.0.0</baseurl>
+      <repoid>HDP-2.6</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/suse11sp3</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="ubuntu12">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/ubuntu12/2.x/updates/2.5.0.0</baseurl>
+      <repoid>HDP-2.6</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="debian7">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/debian7/2.x/updates/2.5.0.0</baseurl>
+      <repoid>HDP-2.6</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/debian6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="ubuntu14">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/ubuntu14/2.x/updates/2.5.0.0</baseurl>
+      <repoid>HDP-2.6</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="ubuntu16">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/ubuntu14/2.x/updates/2.5.0.0</baseurl>
+      <repoid>HDP-2.6</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/ACCUMULO/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ACCUMULO/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ACCUMULO/metainfo.xml
new file mode 100644
index 0000000..ec81f3e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ACCUMULO/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ACCUMULO</name>
+      <version>1.7.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/metainfo.xml
new file mode 100644
index 0000000..92db1f9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ATLAS</name>
+      <version>0.7.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/FALCON/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/FALCON/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/FALCON/metainfo.xml
new file mode 100644
index 0000000..0b54385
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/FALCON/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FALCON</name>
+      <version>0.10.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/FLUME/metainfo.xml
new file mode 100644
index 0000000..33ceb43
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/FLUME/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FLUME</name>
+      <version>1.5.2.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..0feaa5e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HBASE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <version>1.1.2.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..a3e4a64
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <version>2.7.1.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..3b43cc0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <version>1.2.1.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/KAFKA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/KAFKA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/KAFKA/metainfo.xml
new file mode 100644
index 0000000..12f6c45
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/KAFKA/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KAFKA</name>
+      <version>0.10.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/KERBEROS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/KERBEROS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/KERBEROS/metainfo.xml
new file mode 100644
index 0000000..1a931a3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/KERBEROS/metainfo.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KERBEROS</name>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/KNOX/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/KNOX/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/KNOX/metainfo.xml
new file mode 100644
index 0000000..8f303a5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/KNOX/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KNOX</name>
+      <version>0.9.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/MAHOUT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/MAHOUT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/MAHOUT/metainfo.xml
new file mode 100644
index 0000000..9ed6957
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/MAHOUT/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>MAHOUT</name>
+      <version>0.9.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/OOZIE/metainfo.xml
new file mode 100644
index 0000000..9e845c3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/OOZIE/metainfo.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE</name>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/PIG/metainfo.xml
new file mode 100644
index 0000000..0dbf74c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/PIG/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <version>0.16.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER/metainfo.xml
new file mode 100644
index 0000000..6287e00
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER/metainfo.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>RANGER</name>
+      <version>0.6.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER_KMS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER_KMS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER_KMS/metainfo.xml
new file mode 100644
index 0000000..ab065d9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/RANGER_KMS/metainfo.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>RANGER_KMS</name>
+      <version>0.6.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/SLIDER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SLIDER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SLIDER/metainfo.xml
new file mode 100644
index 0000000..6502bfe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SLIDER/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SLIDER</name>
+      <version>0.80.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/metainfo.xml
new file mode 100644
index 0000000..d1129cd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/metainfo.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SPARK</name>
+      <version>1.6.x.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK2/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK2/metainfo.xml
new file mode 100644
index 0000000..2f83cf4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK2/metainfo.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SPARK2</name>
+      <version>2.0.x.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SQOOP/metainfo.xml
new file mode 100644
index 0000000..eb67d63
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SQOOP/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SQOOP</name>
+      <version>1.4.6.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml
new file mode 100644
index 0000000..c1e9490
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>STORM</name>
+      <version>1.0.1.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/TEZ/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/TEZ/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/TEZ/metainfo.xml
new file mode 100644
index 0000000..30f0c25
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/TEZ/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>TEZ</name>
+      <version>0.7.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/metainfo.xml
new file mode 100644
index 0000000..4856d4a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <version>2.7.1.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/ZEPPELIN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ZEPPELIN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ZEPPELIN/metainfo.xml
new file mode 100644
index 0000000..0c3aa77
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ZEPPELIN/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZEPPELIN</name>
+      <version>0.6.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 0000000..de8ebd9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <version>3.4.6.2.5</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/481a5429/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
new file mode 100644
index 0000000..1f722dc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+class HDP26StackAdvisor(HDP25StackAdvisor):
+  pass
\ No newline at end of file


[09/14] ambari git commit: AMBARI-18482. MapReduce is showing incorrect component version in HDP 2.5 (aonishuk)

Posted by jo...@apache.org.
AMBARI-18482. MapReduce is showing incorrect component version in HDP 2.5 (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e1b11577
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e1b11577
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e1b11577

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: e1b1157748417c0fb23ef3b0cb1eddda601b9568
Parents: 7ab4380
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Thu Sep 29 10:24:16 2016 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Thu Sep 29 10:24:16 2016 +0300

----------------------------------------------------------------------
 .../main/resources/stacks/HDP/2.5/services/YARN/metainfo.xml    | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e1b11577/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/metainfo.xml
index 4856d4a..a63534d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/metainfo.xml
@@ -23,5 +23,10 @@
       <name>YARN</name>
       <version>2.7.1.2.5</version>
     </service>
+    <service>
+      <name>MAPREDUCE2</name>
+      <version>2.7.1.2.5</version>
+      <configuration-dir>configuration-mapred</configuration-dir>
+    </service>
   </services>
 </metainfo>


[05/14] ambari git commit: AMBARI-18051 - Services should be able to provide their own pre-req checks by supplying a jar file

Posted by jo...@apache.org.
AMBARI-18051 - Services should be able to provide their own pre-req checks by supplying a jar file


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/26425953
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/26425953
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/26425953

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 26425953175f1167a7040b5c37985eee8724bb28
Parents: 84ecbb4
Author: Tim Thorpe <tt...@apache.org>
Authored: Wed Sep 28 11:33:21 2016 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Wed Sep 28 11:33:21 2016 -0700

----------------------------------------------------------------------
 .../org/apache/ambari/server/checks/UpgradeCheckOrderTest.java   | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/26425953/ambari-server/src/test/java/org/apache/ambari/server/checks/UpgradeCheckOrderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/UpgradeCheckOrderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/UpgradeCheckOrderTest.java
index cb03613..c7b4a79 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/UpgradeCheckOrderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/UpgradeCheckOrderTest.java
@@ -69,8 +69,10 @@ public class UpgradeCheckOrderTest {
     AssignableTypeFilter filter = new AssignableTypeFilter(AbstractCheckDescriptor.class);
     scanner.addIncludeFilter(filter);
 
+    // grab all check subclasses using the exact folder they are in to avoid loading the SampleServiceCheck from the test jar
+    Set<BeanDefinition> beanDefinitions = scanner.findCandidateComponents("org.apache.ambari.server.checks");
+
     // verify they are equal
-    Set<BeanDefinition> beanDefinitions = scanner.findCandidateComponents("org.apache.ambari.server");
     Assert.assertEquals(beanDefinitions.size(), checks.size());
 
     AbstractCheckDescriptor lastCheck = null;


[07/14] ambari git commit: AMBARI-18462. Cannot enable Atlas HA in HDP 2.5 because server cardinality should be 1+ (alejandro)

Posted by jo...@apache.org.
AMBARI-18462. Cannot enable Atlas HA in HDP 2.5 because server cardinality should be 1+ (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/06282e02
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/06282e02
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/06282e02

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 06282e02bbf54df8cd5163a83a11637765be2ff1
Parents: 98f0cfc
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Wed Sep 28 13:10:52 2016 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Wed Sep 28 13:10:52 2016 -0700

----------------------------------------------------------------------
 .../src/main/resources/stacks/HDP/2.5/services/ATLAS/metainfo.xml  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/06282e02/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/metainfo.xml
index 72d612a..98ec263 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/metainfo.xml
@@ -27,7 +27,7 @@
           <name>ATLAS_SERVER</name>
           <displayName>Atlas Metadata Server</displayName>
           <category>MASTER</category>
-          <cardinality>1</cardinality>
+          <cardinality>1+</cardinality>
           <versionAdvertised>true</versionAdvertised>
           <dependencies>
             <dependency>