You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2019/05/08 07:16:50 UTC

[ambari] 01/04: [AMBARI-25266] fix test failures on branch-2.5 (ihorlukianov)

This is an automated email from the ASF dual-hosted git repository.

aonishuk pushed a commit to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 182c4f3b730265917d4f6b1ae3842e9874b81533
Author: Ihor Lukianov <ih...@cloudera.com>
AuthorDate: Fri May 3 11:51:26 2019 +0300

    [AMBARI-25266] fix test failures on branch-2.5 (ihorlukianov)
---
 .../server/metadata/RoleCommandOrderTest.java      |  45 +++++----
 .../ambari/server/metadata/RoleGraphTest.java      |   9 +-
 .../server/stageplanner/TestStagePlanner.java      |   5 +
 .../stacks/HDP/0.2/role_command_order.json         | 104 +++++++++++++++++++++
 4 files changed, 143 insertions(+), 20 deletions(-)

diff --git a/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleCommandOrderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleCommandOrderTest.java
index a8eadb6..7613c17 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleCommandOrderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleCommandOrderTest.java
@@ -18,6 +18,15 @@
 
 package org.apache.ambari.server.metadata;
 
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertFalse;
+import static junit.framework.Assert.assertNotNull;
+import static junit.framework.Assert.assertTrue;
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+
 import java.io.IOException;
 import java.io.InputStream;
 import java.sql.SQLException;
@@ -55,14 +64,6 @@ import com.google.inject.Guice;
 import com.google.inject.Injector;
 
 import junit.framework.Assert;
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertFalse;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertTrue;
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
 
 public class RoleCommandOrderTest {
 
@@ -95,7 +96,8 @@ public class RoleCommandOrderTest {
     ClusterImpl cluster = createMock(ClusterImpl.class);
     Service service = createMock(Service.class);
     expect(cluster.getClusterId()).andReturn(1L);
-    expect(cluster.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.0.6"));
+    expect(cluster.getClusterName()).andReturn("c1");
+    expect(cluster.getDesiredStackVersion()).andReturn(new StackId("HDP", "2.0.6"));
     expect(cluster.getService("GLUSTERFS")).andReturn(service);
     expect(cluster.getService("HDFS")).andReturn(null);
     expect(cluster.getService("YARN")).andReturn(null);
@@ -138,12 +140,13 @@ public class RoleCommandOrderTest {
     ClusterImpl cluster = createMock(ClusterImpl.class);
     expect(cluster.getService("GLUSTERFS")).andReturn(null);
     expect(cluster.getClusterId()).andReturn(1L);
+    expect(cluster.getClusterName()).andReturn("c1");
     Service hdfsService = createMock(Service.class);
 
     expect(cluster.getService("HDFS")).andReturn(hdfsService).atLeastOnce();
     expect(cluster.getService("YARN")).andReturn(null).atLeastOnce();
     expect(hdfsService.getServiceComponent("JOURNALNODE")).andReturn(null);
-    expect(cluster.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.0.6"));
+    expect(cluster.getDesiredStackVersion()).andReturn(new StackId("HDP", "2.0.6"));
 
     replay(cluster);
     replay(hdfsService);
@@ -181,13 +184,14 @@ public class RoleCommandOrderTest {
     ClusterImpl cluster = createMock(ClusterImpl.class);
     expect(cluster.getService("GLUSTERFS")).andReturn(null);
     expect(cluster.getClusterId()).andReturn(1L);
+    expect(cluster.getClusterName()).andReturn("c1");
     Service hdfsService = createMock(Service.class);
     ServiceComponent journalnodeSC = createMock(ServiceComponent.class);
 
     expect(cluster.getService("HDFS")).andReturn(hdfsService).atLeastOnce();
     expect(cluster.getService("YARN")).andReturn(null);
     expect(hdfsService.getServiceComponent("JOURNALNODE")).andReturn(journalnodeSC);
-    expect(cluster.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.0.6"));
+    expect(cluster.getDesiredStackVersion()).andReturn(new StackId("HDP", "2.0.6"));
 
     replay(cluster);
     replay(hdfsService);
@@ -222,8 +226,9 @@ public class RoleCommandOrderTest {
     ServiceComponentHost sch2 = createMock(ServiceComponentHostImpl.class);
     expect(cluster.getService("GLUSTERFS")).andReturn(null);
     expect(cluster.getClusterId()).andReturn(1L);
+    expect(cluster.getClusterName()).andReturn("c1");
 
-    Map<String, ServiceComponentHost> hostComponents = new HashMap<String, ServiceComponentHost>();
+    Map<String, ServiceComponentHost> hostComponents = new HashMap<>();
     hostComponents.put("1",sch1);
     hostComponents.put("2",sch2);
 
@@ -234,7 +239,7 @@ public class RoleCommandOrderTest {
     expect(cluster.getService("HDFS")).andReturn(null);
     expect(yarnService.getServiceComponent("RESOURCEMANAGER")).andReturn(resourcemanagerSC).anyTimes();
     expect(resourcemanagerSC.getServiceComponentHosts()).andReturn(hostComponents).anyTimes();
-    expect(cluster.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.0.6"));
+    expect(cluster.getDesiredStackVersion()).andReturn(new StackId("HDP", "2.0.6"));
 
     replay(cluster, yarnService, sch1, sch2, resourcemanagerSC);
 
@@ -274,6 +279,7 @@ public class RoleCommandOrderTest {
     ServiceComponentHost sch2 = createMock(ServiceComponentHostImpl.class);
     expect(cluster.getService("GLUSTERFS")).andReturn(null);
     expect(cluster.getClusterId()).andReturn(1L);
+    expect(cluster.getClusterName()).andReturn("c1");
 
     Map<String, ServiceComponentHost> hostComponents = new HashMap<>();
     hostComponents.put("1", sch1);
@@ -286,7 +292,7 @@ public class RoleCommandOrderTest {
     expect(cluster.getService("HDFS")).andReturn(null);
     expect(yarnService.getServiceComponent("RESOURCEMANAGER")).andReturn(resourcemanagerSC).anyTimes();
     expect(resourcemanagerSC.getServiceComponentHosts()).andReturn(hostComponents).anyTimes();
-    expect(cluster.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.0.6"));
+    expect(cluster.getDesiredStackVersion()).andReturn(new StackId("HDP", "2.0.6"));
 
     replay(cluster, yarnService, sch1, sch2, resourcemanagerSC);
 
@@ -373,13 +379,14 @@ public class RoleCommandOrderTest {
     ClusterImpl cluster = createMock(ClusterImpl.class);
     expect(cluster.getService("GLUSTERFS")).andReturn(null);
     expect(cluster.getClusterId()).andReturn(1L);
+    expect(cluster.getClusterName()).andReturn("c1");
     Service hdfsService = createMock(Service.class);
 
     expect(cluster.getService("HDFS")).andReturn(hdfsService).atLeastOnce();
     expect(cluster.getService("YARN")).andReturn(null);
     expect(hdfsService.getServiceComponent("JOURNALNODE")).andReturn(null);
     //There is no rco file in this stack, should use default
-    expect(cluster.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.0.5"));
+    expect(cluster.getDesiredStackVersion()).andReturn(new StackId("HDP", "2.0.5"));
 
     replay(cluster);
     replay(hdfsService);
@@ -405,6 +412,7 @@ public class RoleCommandOrderTest {
     Service hbaseService = createMock(Service.class);
     expect(cluster.getService("HBASE")).andReturn(hbaseService).atLeastOnce();
     expect(cluster.getClusterId()).andReturn(1L);
+    expect(cluster.getClusterName()).andReturn("c1");
     expect(hbaseService.getCluster()).andReturn(cluster).anyTimes();
 
     ServiceComponent hbaseMaster = createMock(ServiceComponent.class);
@@ -414,7 +422,7 @@ public class RoleCommandOrderTest {
         "HBASE_MASTER", hbaseMaster);
     expect(hbaseService.getServiceComponents()).andReturn(hbaseComponents).anyTimes();
 
-    Map<String, Service> installedServices = new HashMap<String, Service>();
+    Map<String, Service> installedServices = new HashMap<>();
     installedServices.put("HDFS", hdfsService);
     installedServices.put("HBASE", hbaseService);
     expect(cluster.getServices()).andReturn(installedServices).atLeastOnce();
@@ -424,7 +432,7 @@ public class RoleCommandOrderTest {
     expect(cluster.getService("GLUSTERFS")).andReturn(null);
     expect(cluster.getService("YARN")).andReturn(null);
     expect(hdfsService.getServiceComponent("JOURNALNODE")).andReturn(null);
-    expect(cluster.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.0.5"));
+    expect(cluster.getDesiredStackVersion()).andReturn(new StackId("HDP", "2.0.5"));
 
     //replay
     replay(cluster, hdfsService, hbaseService, hbaseMaster, namenode);
@@ -460,6 +468,7 @@ public class RoleCommandOrderTest {
     ClusterImpl cluster = createMock(ClusterImpl.class);
     expect(cluster.getService("GLUSTERFS")).andReturn(null).atLeastOnce();
     expect(cluster.getClusterId()).andReturn(1L).atLeastOnce();
+    expect(cluster.getClusterName()).andReturn("c1").atLeastOnce();
     Service hdfsService = createMock(Service.class);
 
     expect(cluster.getService("HDFS")).andReturn(hdfsService).atLeastOnce();
@@ -467,7 +476,7 @@ public class RoleCommandOrderTest {
     expect(hdfsService.getServiceComponent("JOURNALNODE")).andReturn(null);
 
     // There is no rco file in this stack, should use default
-    expect(cluster.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.2.0")).atLeastOnce();
+    expect(cluster.getDesiredStackVersion()).andReturn(new StackId("HDP", "2.2.0")).atLeastOnce();
 
     replay(cluster);
     replay(hdfsService);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java b/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java
index 39b7ed4..c67d072 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java
@@ -19,6 +19,9 @@
 package org.apache.ambari.server.metadata;
 
 
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
 import java.sql.SQLException;
 import java.util.HashMap;
 import java.util.List;
@@ -48,8 +51,6 @@ import com.google.inject.Guice;
 import com.google.inject.Injector;
 
 import junit.framework.Assert;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
 
 public class RoleGraphTest {
 
@@ -76,7 +77,9 @@ public class RoleGraphTest {
   public void testValidateOrder() throws AmbariException {
     ClusterImpl cluster = mock(ClusterImpl.class);
     when(cluster.getCurrentStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
+    when(cluster.getDesiredStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
     when(cluster.getClusterId()).thenReturn(1L);
+    when(cluster.getClusterName()).thenReturn("c1");
 
     RoleCommandOrder rco = roleCommandOrderProvider.getRoleCommandOrder(cluster);
 
@@ -163,7 +166,9 @@ public class RoleGraphTest {
   public void testGetOrderedHostRoleCommands() throws AmbariException {
     ClusterImpl cluster = mock(ClusterImpl.class);
     when(cluster.getCurrentStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
+    when(cluster.getDesiredStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
     when(cluster.getClusterId()).thenReturn(1L);
+    when(cluster.getClusterName()).thenReturn("c1");
 
     RoleCommandOrder rco = roleCommandOrderProvider.getRoleCommandOrder(cluster);
     RoleGraph roleGraph = roleGraphFactory.createNew(rco);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java b/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
index 3e592b2..32161b9 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
@@ -77,6 +77,7 @@ public class TestStagePlanner {
   public void testSingleStagePlan() throws AmbariException {
     ClusterImpl cluster = mock(ClusterImpl.class);
     when(cluster.getCurrentStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
+    when(cluster.getDesiredStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
 
     RoleCommandOrder rco = roleCommandOrderProvider.getRoleCommandOrder(cluster);
 
@@ -97,6 +98,7 @@ public class TestStagePlanner {
   public void testMultiStagePlan() {
     ClusterImpl cluster = mock(ClusterImpl.class);
     when(cluster.getCurrentStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
+    when(cluster.getDesiredStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
     RoleCommandOrder rco = roleCommandOrderProvider.getRoleCommandOrder(cluster);
     RoleGraph rg = roleGraphFactory.createNew(rco);
     long now = System.currentTimeMillis();
@@ -122,6 +124,7 @@ public class TestStagePlanner {
   public void testRestartStagePlan() {
     ClusterImpl cluster = mock(ClusterImpl.class);
     when(cluster.getCurrentStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
+    when(cluster.getDesiredStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
     RoleCommandOrder rco = roleCommandOrderProvider.getRoleCommandOrder(cluster);
     RoleGraph rg = roleGraphFactory.createNew(rco);
     long now = System.currentTimeMillis();
@@ -151,6 +154,7 @@ public class TestStagePlanner {
   public void testManyStages() {
     ClusterImpl cluster = mock(ClusterImpl.class);
     when(cluster.getCurrentStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
+    when(cluster.getDesiredStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
     RoleCommandOrder rco = roleCommandOrderProvider.getRoleCommandOrder(cluster);
     RoleGraph rg = roleGraphFactory.createNew(rco);
     long now = System.currentTimeMillis();
@@ -202,6 +206,7 @@ public class TestStagePlanner {
   public void testDependencyOrderedStageCreate() {
     ClusterImpl cluster = mock(ClusterImpl.class);
     when(cluster.getCurrentStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
+    when(cluster.getDesiredStackVersion()).thenReturn(new StackId("HDP-2.0.6"));
     RoleCommandOrder rco = roleCommandOrderProvider.getRoleCommandOrder(cluster);
     RoleGraph rg = roleGraphFactory.createNew(rco);
     rg.setCommandExecutionType(CommandExecutionType.DEPENDENCY_ORDERED);
diff --git a/ambari-server/src/test/resources/stacks/HDP/0.2/role_command_order.json b/ambari-server/src/test/resources/stacks/HDP/0.2/role_command_order.json
new file mode 100644
index 0000000..870a537
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/0.2/role_command_order.json
@@ -0,0 +1,104 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
+    "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
+    "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "WEBHCAT_SERVER-START": ["TASKTRACKER-START", "HIVE_SERVER-START"],
+    "WEBHCAT_SERVER-RESTART": ["TASKTRACKER-RESTART", "HIVE_SERVER-RESTART"],
+    "HIVE_METASTORE-START": ["MYSQL_SERVER-START"],
+    "HIVE_METASTORE-RESTART": ["MYSQL_SERVER-RESTART"],
+    "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
+    "HIVE_SERVER-RESTART": ["TASKTRACKER-RESTART", "MYSQL_SERVER-RESTART"],
+    "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
+    "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
+    "ACCUMULO_MASTER-START": ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START"],
+    "ACCUMULO_TSERVER-START": ["ACCUMULO_MASTER-START"],
+    "ACCUMULO_MONITOR-START": ["ACCUMULO_MASTER-START"],
+    "ACCUMULO_GC-START": ["ACCUMULO_MASTER-START"],
+    "ACCUMULO_TRACER-START": ["ACCUMULO_MASTER-START", "ACCUMULO_TSERVER-START"],
+    "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
+    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
+    "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"],
+    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "ACCUMULO_SERVICE_CHECK-SERVICE_CHECK": ["ACCUMULO_MASTER-START", "ACCUMULO_TSERVER-START", "ACCUMULO_TRACER-START"],
+    "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP", "ACCUMULO_MASTER-STOP", "ACCUMULO_TSERVER-STOP", "ACCUMULO_GC-STOP"],
+    "ACCUMULO_MONITOR-STOP" : ["ACCUMULO_MASTER-STOP"],
+    "ACCUMULO_MASTER-STOP" : ["ACCUMULO_TSERVER-STOP"],
+    "ACCUMULO_TSERVER-STOP" : ["ACCUMULO_TRACER-STOP", "ACCUMULO_GC-STOP"],
+    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
+    "TASKTRACKER-UPGRADE": ["JOBTRACKER-UPGRADE"],
+    "MAPREDUCE_CLIENT-UPGRADE": ["TASKTRACKER-UPGRADE", "JOBTRACKER-UPGRADE"],
+    "ZOOKEEPER_SERVER-UPGRADE": ["MAPREDUCE_CLIENT-UPGRADE"],
+    "ZOOKEEPER_CLIENT-UPGRADE": ["ZOOKEEPER_SERVER-UPGRADE"],
+    "HBASE_MASTER-UPGRADE": ["ZOOKEEPER_CLIENT-UPGRADE"],
+    "HBASE_REGIONSERVER-UPGRADE": ["HBASE_MASTER-UPGRADE"],
+    "HBASE_CLIENT-UPGRADE": ["HBASE_REGIONSERVER-UPGRADE"],
+    "HIVE_SERVER-UPGRADE" : ["HBASE_CLIENT-UPGRADE"],
+    "HIVE_METASTORE-UPGRADE" : ["HIVE_SERVER-UPGRADE"],
+    "MYSQL_SERVER-UPGRADE": ["HIVE_METASTORE-UPGRADE"],
+    "HIVE_CLIENT-UPGRADE": ["MYSQL_SERVER-UPGRADE"],
+    "HCAT-UPGRADE": ["HIVE_CLIENT-UPGRADE"],
+    "OOZIE_SERVER-UPGRADE" : ["HCAT-UPGRADE"],
+    "OOZIE_CLIENT-UPGRADE" : ["OOZIE_SERVER-UPGRADE"],
+    "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
+    "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
+    "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
+    "GANGLIA_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
+    "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
+  },
+  "_comment" : "GLUSTERFS-specific dependencies",
+  "optional_glusterfs": {
+    "HBASE_MASTER-START": ["PEERSTATUS-START"],
+    "JOBTRACKER-START": ["PEERSTATUS-START"],
+    "TASKTRACKER-START": ["PEERSTATUS-START"],
+    "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"],
+    "JOBTRACKER-UPGRADE": ["GLUSTERFS_CLIENT-UPGRADE"]
+  },
+  "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
+  "optional_no_glusterfs": {
+    "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
+    "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
+    "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
+    "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
+    "JOBTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
+    "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HIVE_SERVER-START": ["DATANODE-START"],
+    "WEBHCAT_SERVER-START": ["DATANODE-START"],
+    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
+        "SECONDARY_NAMENODE-START"],
+    "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
+        "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
+    "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "NAMENODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP",
+        "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+    "DATANODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP",
+        "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+    "SECONDARY_NAMENODE-UPGRADE": ["NAMENODE-UPGRADE"],
+    "DATANODE-UPGRADE": ["SECONDARY_NAMENODE-UPGRADE"],
+    "HDFS_CLIENT-UPGRADE": ["DATANODE-UPGRADE"],
+    "JOBTRACKER-UPGRADE": ["HDFS_CLIENT-UPGRADE"]
+  },
+  "_comment" : "Dependencies that are used in HA NameNode cluster",
+  "namenode_optional_ha": {
+    "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
+    "ZKFC-START": ["NAMENODE-START"],
+    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
+  },
+  "_comment" : "Dependencies that are used in ResourceManager HA cluster",
+  "resourcemanager_optional_ha" : {
+    "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
+  }
+}
+