You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by js...@apache.org on 2014/11/10 22:32:04 UTC

[05/11] ambari git commit: AMBARI-7175. Add explicit stack service inheritance

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fc7adec/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
new file mode 100644
index 0000000..a6cbc6a
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
@@ -0,0 +1,594 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.stack;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.metadata.ActionMetadata;
+import org.apache.ambari.server.orm.dao.MetainfoDAO;
+import org.apache.ambari.server.state.ClientConfigFileDefinition;
+import org.apache.ambari.server.state.CommandScriptDefinition;
+import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.ServiceOsSpecific;
+import org.apache.ambari.server.state.StackInfo;
+import org.apache.ambari.server.state.stack.OsFamily;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * StackManager unit tests.
+ */
+public class StackManagerTest {
+
+  private static StackManager stackManager;
+  private static MetainfoDAO dao;
+  private static ActionMetadata actionMetadata;
+  private static OsFamily osFamily;
+
+  @BeforeClass
+  public static void initStack() throws Exception{
+    stackManager = createTestStackManager();
+  }
+
+  public static StackManager createTestStackManager() throws Exception {
+    return createTestStackManager("./src/test/resources/stacks/");
+  }
+
+  public static StackManager createTestStackManager(String stackRoot) throws Exception {
+    try {
+      //todo: dao , actionMetaData expectations
+      dao = createNiceMock(MetainfoDAO.class);
+      actionMetadata = createNiceMock(ActionMetadata.class);
+      Configuration config = createNiceMock(Configuration.class);
+      expect(config.getSharedResourcesDirPath()).andReturn("./src/test/resources").anyTimes();
+      replay(config);
+      osFamily = new OsFamily(config);
+
+      replay(dao, actionMetadata);
+      return new StackManager(new File(stackRoot), new StackContext(dao, actionMetadata, osFamily));
+    } catch (Exception e) {
+      e.printStackTrace();
+      throw e;
+    }
+  }
+
+  @Test
+  public void testGetStacks_count() throws Exception {
+    Collection<StackInfo> stacks = stackManager.getStacks();
+    assertEquals(16, stacks.size());
+  }
+
+  @Test
+  public void testGetStack_name__count() {
+    Collection<StackInfo> stacks = stackManager.getStacks("HDP");
+    assertEquals(12, stacks.size());
+
+    stacks = stackManager.getStacks("OTHER");
+    assertEquals(2, stacks.size());
+  }
+
+  @Test
+  public void testGetStack_basic() {
+    StackInfo stack = stackManager.getStack("HDP", "0.1");
+    assertNotNull(stack);
+    assertEquals("HDP", stack.getName());
+    assertEquals("0.1", stack.getVersion());
+
+
+    Collection<ServiceInfo> services = stack.getServices();
+    assertEquals(3, services.size());
+
+    Map<String, ServiceInfo> serviceMap = new HashMap<String, ServiceInfo>();
+    for (ServiceInfo service : services) {
+      serviceMap.put(service.getName(), service);
+    }
+    ServiceInfo hdfsService = serviceMap.get("HDFS");
+    assertNotNull(hdfsService);
+    List<ComponentInfo> components = hdfsService.getComponents();
+    assertEquals(6, components.size());
+    List<PropertyInfo> properties = hdfsService.getProperties();
+    assertEquals(62, properties.size());
+
+    // test a couple of the properties for filename
+    boolean hdfsPropFound = false;
+    boolean hbasePropFound = false;
+    for (PropertyInfo p : properties) {
+      if (p.getName().equals("hbase.regionserver.msginterval")) {
+        assertEquals("hbase-site.xml", p.getFilename());
+        hbasePropFound = true;
+      } else if (p.getName().equals("dfs.name.dir")) {
+        assertEquals("hdfs-site.xml", p.getFilename());
+        hdfsPropFound = true;
+      }
+    }
+    assertTrue(hbasePropFound);
+    assertTrue(hdfsPropFound);
+
+    ServiceInfo mrService = serviceMap.get("MAPREDUCE");
+    assertNotNull(mrService);
+    components = mrService.getComponents();
+    assertEquals(3, components.size());
+
+    ServiceInfo pigService = serviceMap.get("PIG");
+    assertNotNull(pigService);
+    assertEquals("PIG", pigService.getName());
+    assertEquals("1.0", pigService.getVersion());
+    assertNull(pigService.getParent());
+    assertEquals("This is comment for PIG service", pigService.getComment());
+    components = pigService.getComponents();
+    assertEquals(1, components.size());
+    CommandScriptDefinition commandScript = pigService.getCommandScript();
+    assertEquals("scripts/service_check.py", commandScript.getScript());
+    assertEquals(CommandScriptDefinition.Type.PYTHON, commandScript.getScriptType());
+    assertEquals(300, commandScript.getTimeout());
+    List<String> configDependencies = pigService.getConfigDependencies();
+    assertEquals(1, configDependencies.size());
+    assertEquals("global", configDependencies.get(0));
+    assertEquals("global", pigService.getConfigDependenciesWithComponents().get(0));
+    ComponentInfo client = pigService.getClientComponent();
+    assertNotNull(client);
+    assertEquals("PIG", client.getName());
+    assertEquals("0+", client.getCardinality());
+    assertEquals("CLIENT", client.getCategory());
+    assertEquals("configuration", pigService.getConfigDir());
+    assertEquals("2.0", pigService.getSchemaVersion());
+    Map<String, ServiceOsSpecific> osInfoMap = pigService.getOsSpecifics();
+    assertEquals(1, osInfoMap.size());
+    ServiceOsSpecific osSpecific = osInfoMap.get("centos6");
+    assertNotNull(osSpecific);
+    assertEquals("centos6", osSpecific.getOsFamily());
+    assertNull(osSpecific.getRepo());
+    List<ServiceOsSpecific.Package> packages = osSpecific.getPackages();
+    assertEquals(1, packages.size());
+    ServiceOsSpecific.Package pkg = packages.get(0);
+    assertEquals("pig", pkg.getName());
+  }
+
+  @Test
+  public void testStackVersionInheritance_includeAllServices() {
+    StackInfo stack = stackManager.getStack("HDP", "2.1.1");
+    assertNotNull(stack);
+    assertEquals("HDP", stack.getName());
+    assertEquals("2.1.1", stack.getVersion());
+    Collection<ServiceInfo> services = stack.getServices();
+
+    //should include all stacks in hierarchy
+    assertEquals(14, services.size());
+    HashSet<String> expectedServices = new HashSet<String>();
+    expectedServices.add("GANGLIA");
+    expectedServices.add("HBASE");
+    expectedServices.add("HCATALOG");
+    expectedServices.add("HDFS");
+    expectedServices.add("HIVE");
+    expectedServices.add("MAPREDUCE2");
+    expectedServices.add("NAGIOS");
+    expectedServices.add("OOZIE");
+    expectedServices.add("PIG");
+    expectedServices.add("SQOOP");
+    expectedServices.add("YARN");
+    expectedServices.add("ZOOKEEPER");
+    expectedServices.add("STORM");
+    expectedServices.add("FLUME");
+
+    ServiceInfo pigService = null;
+    for (ServiceInfo service : services) {
+      if (service.getName().equals("PIG")) {
+        pigService = service;
+      }
+      assertTrue(expectedServices.remove(service.getName()));
+    }
+    assertTrue(expectedServices.isEmpty());
+
+    // extended values
+    assertNotNull(pigService);
+    assertEquals("0.12.1.2.1.1", pigService.getVersion());
+    assertEquals("Scripting platform for analyzing large datasets (Extended)", pigService.getComment());
+    //base value
+    ServiceInfo basePigService = stackManager.getStack("HDP", "2.0.5").getService("PIG");
+    assertEquals("0.11.1.2.0.5.0", basePigService.getVersion());
+    assertEquals(1, basePigService.getComponents().size());
+    // new component added in extended version
+    assertEquals(2, pigService.getComponents().size());
+    // no properties in base service
+    assertEquals(0, basePigService.getProperties().size());
+    assertEquals(1, pigService.getProperties().size());
+    assertEquals("content", pigService.getProperties().get(0).getName());
+  }
+
+  @Test
+  public void testGetStack_explicitServiceExtension() {
+    StackInfo stack = stackManager.getStack("OTHER", "1.0");
+    assertNotNull(stack);
+    assertEquals("OTHER", stack.getName());
+    assertEquals("1.0", stack.getVersion());
+    Collection<ServiceInfo> services = stack.getServices();
+
+    assertEquals(3, services.size());
+
+    // hdfs service
+    assertEquals(6, stack.getService("HDFS").getComponents().size());
+
+    // Extended Sqoop service via explicit service extension
+    ServiceInfo sqoopService = stack.getService("SQOOP2");
+    assertNotNull(sqoopService);
+
+    assertEquals("Extended SQOOP", sqoopService.getComment());
+    assertEquals("Extended Version", sqoopService.getVersion());
+    assertNull(sqoopService.getServicePackageFolder());
+
+    Collection<ComponentInfo> components = sqoopService.getComponents();
+    assertEquals(1, components.size());
+    ComponentInfo component = components.iterator().next();
+    assertEquals("SQOOP", component.getName());
+
+    // Get the base sqoop service
+    StackInfo baseStack = stackManager.getStack("HDP", "2.1.1");
+    ServiceInfo baseSqoopService = baseStack.getService("SQOOP");
+
+    // values from base service
+    assertEquals(baseSqoopService.isDeleted(), sqoopService.isDeleted());
+    assertEquals(baseSqoopService.getAlertsFile(),sqoopService.getAlertsFile());
+    assertEquals(baseSqoopService.getClientComponent(), sqoopService.getClientComponent());
+    assertEquals(baseSqoopService.getCommandScript(), sqoopService.getCommandScript());
+    assertEquals(baseSqoopService.getConfigDependencies(), sqoopService.getConfigDependencies());
+    assertEquals(baseSqoopService.getConfigDir(), sqoopService.getConfigDir());
+    assertEquals(baseSqoopService.getConfigDependenciesWithComponents(), sqoopService.getConfigDependenciesWithComponents());
+    assertEquals(baseSqoopService.getConfigTypeAttributes(), sqoopService.getConfigTypeAttributes());
+    assertEquals(baseSqoopService.getCustomCommands(), sqoopService.getCustomCommands());
+    assertEquals(baseSqoopService.getExcludedConfigTypes(), sqoopService.getExcludedConfigTypes());
+    assertEquals(baseSqoopService.getProperties(), sqoopService.getProperties());
+    assertEquals(baseSqoopService.getMetrics(), sqoopService.getMetrics());
+    assertNull(baseSqoopService.getMetricsFile());
+    assertNull(sqoopService.getMetricsFile());
+    assertEquals(baseSqoopService.getOsSpecifics(), sqoopService.getOsSpecifics());
+    assertEquals(baseSqoopService.getRequiredServices(), sqoopService.getRequiredServices());
+    assertEquals(baseSqoopService.getSchemaVersion(), sqoopService.getSchemaVersion());
+
+    // extended Storm service via explicit service extension
+    ServiceInfo stormService = stack.getService("STORM");
+    assertNotNull(stormService);
+    assertEquals("STORM", stormService.getName());
+
+    // base storm service
+    ServiceInfo baseStormService = baseStack.getService("STORM");
+
+    // overridden value
+    assertEquals("Apache Hadoop Stream processing framework (Extended)", stormService.getComment());
+    assertEquals("New version", stormService.getVersion());
+    assertEquals("OTHER/1.0/services/STORM/package", stormService.getServicePackageFolder());
+    // compare components
+    List<ComponentInfo> stormServiceComponents = stormService.getComponents();
+    List<ComponentInfo> baseStormServiceComponents = baseStormService.getComponents();
+    assertEquals(new HashSet<ComponentInfo>(stormServiceComponents), new HashSet<ComponentInfo>(baseStormServiceComponents));
+    // values from base service
+    assertEquals(baseStormService.isDeleted(), stormService.isDeleted());
+    //todo: specify alerts file in stack
+    assertEquals(baseStormService.getAlertsFile(),stormService.getAlertsFile());
+
+    assertEquals(baseStormService.getClientComponent(), stormService.getClientComponent());
+    assertEquals(baseStormService.getCommandScript(), stormService.getCommandScript());
+    assertEquals(baseStormService.getConfigDependencies(), stormService.getConfigDependencies());
+    assertEquals(baseStormService.getConfigDir(), stormService.getConfigDir());
+    assertEquals(baseStormService.getConfigDependenciesWithComponents(), stormService.getConfigDependenciesWithComponents());
+    assertEquals(baseStormService.getConfigTypeAttributes(), stormService.getConfigTypeAttributes());
+    assertEquals(baseStormService.getCustomCommands(), stormService.getCustomCommands());
+    assertEquals(baseStormService.getExcludedConfigTypes(), stormService.getExcludedConfigTypes());
+    assertEquals(baseStormService.getProperties(), stormService.getProperties());
+    assertEquals(baseStormService.getMetrics(), stormService.getMetrics());
+    assertNotNull(baseStormService.getMetricsFile());
+    assertNotNull(stormService.getMetricsFile());
+    assertFalse(baseStormService.getMetricsFile().equals(stormService.getMetricsFile()));
+    assertEquals(baseStormService.getOsSpecifics(), stormService.getOsSpecifics());
+    assertEquals(baseStormService.getRequiredServices(), stormService.getRequiredServices());
+    assertEquals(baseStormService.getSchemaVersion(), stormService.getSchemaVersion());
+  }
+
+  @Test
+  public void testGetStack_versionInheritance__explicitServiceExtension() {
+    StackInfo baseStack = stackManager.getStack("OTHER", "1.0");
+    StackInfo stack = stackManager.getStack("OTHER", "2.0");
+
+    assertEquals(4, stack.getServices().size());
+
+    ServiceInfo service = stack.getService("SQOOP2");
+    ServiceInfo baseSqoopService = baseStack.getService("SQOOP2");
+
+    assertEquals("SQOOP2", service.getName());
+    assertEquals("Inherited from parent", service.getComment());
+    assertEquals("Extended from parent version", service.getVersion());
+    assertNull(service.getServicePackageFolder());
+    // compare components
+    List<ComponentInfo> serviceComponents = service.getComponents();
+    List<ComponentInfo> baseStormServiceCompoents = baseSqoopService.getComponents();
+    assertEquals(serviceComponents, baseStormServiceCompoents);
+    // values from base service
+    assertEquals(baseSqoopService.isDeleted(), service.isDeleted());
+    assertEquals(baseSqoopService.getAlertsFile(),service.getAlertsFile());
+    assertEquals(baseSqoopService.getClientComponent(), service.getClientComponent());
+    assertEquals(baseSqoopService.getCommandScript(), service.getCommandScript());
+    assertEquals(baseSqoopService.getConfigDependencies(), service.getConfigDependencies());
+    assertEquals(baseSqoopService.getConfigDir(), service.getConfigDir());
+    assertEquals(baseSqoopService.getConfigDependenciesWithComponents(), service.getConfigDependenciesWithComponents());
+    assertEquals(baseSqoopService.getConfigTypeAttributes(), service.getConfigTypeAttributes());
+    assertEquals(baseSqoopService.getCustomCommands(), service.getCustomCommands());
+    assertEquals(baseSqoopService.getExcludedConfigTypes(), service.getExcludedConfigTypes());
+    assertEquals(baseSqoopService.getProperties(), service.getProperties());
+    assertEquals(baseSqoopService.getMetrics(), service.getMetrics());
+    assertNull(baseSqoopService.getMetricsFile());
+    assertNull(service.getMetricsFile());
+    assertEquals(baseSqoopService.getOsSpecifics(), service.getOsSpecifics());
+    assertEquals(baseSqoopService.getRequiredServices(), service.getRequiredServices());
+    assertEquals(baseSqoopService.getSchemaVersion(), service.getSchemaVersion());
+  }
+
+  @Test
+  public void testConfigDependenciesInheritance() throws Exception{
+    StackInfo stack = stackManager.getStack("HDP", "2.0.6");
+    ServiceInfo hdfsService = stack.getService("HDFS");
+    assertEquals(5, hdfsService.getConfigDependencies().size());
+    assertEquals(4, hdfsService.getConfigTypeAttributes().size());
+    assertTrue(hdfsService.getConfigDependencies().contains("core-site"));
+    assertTrue(hdfsService.getConfigDependencies().contains("global"));
+    assertTrue(hdfsService.getConfigDependencies().contains("hdfs-site"));
+    assertTrue(hdfsService.getConfigDependencies().contains("hdfs-log4j"));
+    assertTrue(hdfsService.getConfigDependencies().contains("hadoop-policy"));
+    assertTrue(Boolean.valueOf(hdfsService.getConfigTypeAttributes().get("core-site").get("supports").get("final")));
+    assertFalse(Boolean.valueOf(hdfsService.getConfigTypeAttributes().get("global").get("supports").get("final")));
+  }
+
+  @Test
+  public void testClientConfigFilesInheritance() throws Exception{
+    StackInfo stack = stackManager.getStack("HDP", "2.0.6");
+    ServiceInfo zkService = stack.getService("ZOOKEEPER");
+    List<ComponentInfo> components = zkService.getComponents();
+    assertTrue(components.size() == 2);
+    ComponentInfo componentInfo = components.get(1);
+    List<ClientConfigFileDefinition> clientConfigs = componentInfo.getClientConfigFiles();
+    assertEquals(2,clientConfigs.size());
+    assertEquals("zookeeper-env",clientConfigs.get(0).getDictionaryName());
+    assertEquals("zookeeper-env.sh",clientConfigs.get(0).getFileName());
+    assertEquals("env",clientConfigs.get(0).getType());
+    assertEquals("zookeeper-log4j",clientConfigs.get(1).getDictionaryName());
+    assertEquals("log4j.properties",clientConfigs.get(1).getFileName());
+    assertEquals("env", clientConfigs.get(1).getType());
+  }
+
+  @Test
+  public void testMonitoringServicePropertyInheritance() throws Exception{
+    StackInfo stack = stackManager.getStack("HDP", "2.0.7");
+    Collection<ServiceInfo> allServices = stack.getServices();
+    assertEquals(13, allServices.size());
+    for (ServiceInfo serviceInfo : allServices) {
+      if (serviceInfo.getName().equals("NAGIOS")) {
+        assertTrue(serviceInfo.isMonitoringService());
+      } else {
+        assertNull(serviceInfo.isMonitoringService());
+      }
+    }
+  }
+
+  @Test
+  public void testServiceDeletion() {
+    StackInfo stack = stackManager.getStack("HDP", "2.0.6");
+    Collection<ServiceInfo> allServices = stack.getServices();
+
+    assertEquals(12, allServices.size());
+    HashSet<String> expectedServices = new HashSet<String>();
+    expectedServices.add("GANGLIA");
+    expectedServices.add("HBASE");
+    expectedServices.add("HCATALOG");
+    expectedServices.add("HDFS");
+    expectedServices.add("HIVE");
+    expectedServices.add("MAPREDUCE2");
+    expectedServices.add("NAGIOS");
+    expectedServices.add("OOZIE");
+    expectedServices.add("PIG");
+    expectedServices.add("ZOOKEEPER");
+    expectedServices.add("FLUME");
+    expectedServices.add("YARN");
+
+    for (ServiceInfo service : allServices) {
+      assertTrue(expectedServices.remove(service.getName()));
+    }
+    assertTrue(expectedServices.isEmpty());
+  }
+
+  @Test
+  public void testComponentDeletion() {
+    StackInfo stack = stackManager.getStack("HDP", "2.0.6");
+    ServiceInfo yarnService = stack.getService("YARN");
+    assertNull(yarnService.getComponentByName("YARN_CLIENT"));
+
+    stack = stackManager.getStack("HDP", "2.0.7");
+    yarnService = stack.getService("YARN");
+    assertNotNull(yarnService.getComponentByName("YARN_CLIENT"));
+  }
+
+  @Test
+  public void testPopulateConfigTypes() throws Exception {
+    StackInfo stack = stackManager.getStack("HDP", "2.0.7");
+    ServiceInfo hdfsService = stack.getService("HDFS");
+
+    Map<String, Map<String, Map<String, String>>> configTypes = hdfsService.getConfigTypeAttributes();
+    assertEquals(4, configTypes.size());
+
+    Map<String, Map<String, String>> configType = configTypes.get("global");
+    assertEquals(1, configType.size());
+    Map<String, String> supportsMap = configType.get("supports");
+    assertEquals(3, supportsMap.size());
+    assertEquals("true", supportsMap.get("final"));
+    assertEquals("false", supportsMap.get("adding_forbidden"));
+    assertEquals("false", supportsMap.get("do_not_extend"));
+
+    configType = configTypes.get("hdfs-site");
+    assertEquals(1, configType.size());
+    supportsMap = configType.get("supports");
+    assertEquals(3, supportsMap.size());
+    assertEquals("false", supportsMap.get("final"));
+    assertEquals("false", supportsMap.get("adding_forbidden"));
+    assertEquals("false", supportsMap.get("do_not_extend"));
+
+    configType = configTypes.get("core-site");
+    assertEquals(1, configType.size());
+    supportsMap = configType.get("supports");
+    assertEquals(3, supportsMap.size());
+    assertEquals("false", supportsMap.get("final"));
+    assertEquals("false", supportsMap.get("adding_forbidden"));
+    assertEquals("false", supportsMap.get("do_not_extend"));
+
+    configType = configTypes.get("hadoop-policy");
+    assertEquals(1, configType.size());
+    supportsMap = configType.get("supports");
+    assertEquals(3, supportsMap.size());
+    assertEquals("false", supportsMap.get("final"));
+    assertEquals("false", supportsMap.get("adding_forbidden"));
+    assertEquals("false", supportsMap.get("do_not_extend"));
+
+    ServiceInfo yarnService = stack.getService("YARN");
+    configTypes = yarnService.getConfigTypeAttributes();
+    assertEquals(4, configTypes.size());
+    assertTrue(configTypes.containsKey("yarn-site"));
+    assertTrue(configTypes.containsKey("core-site"));
+    assertTrue(configTypes.containsKey("global"));
+    assertTrue(configTypes.containsKey("capacity-scheduler"));
+
+    configType = configTypes.get("yarn-site");
+    supportsMap = configType.get("supports");
+    assertEquals(3, supportsMap.size());
+    assertEquals("false", supportsMap.get("final"));
+    assertEquals("true", supportsMap.get("adding_forbidden"));
+    assertEquals("true", supportsMap.get("do_not_extend"));
+
+    ServiceInfo mrService = stack.getService("MAPREDUCE2");
+    configTypes = mrService.getConfigTypeAttributes();
+    assertEquals(3, configTypes.size());
+    assertTrue(configTypes.containsKey("mapred-site"));
+    assertTrue(configTypes.containsKey("core-site"));
+    assertTrue(configTypes.containsKey("mapred-queue-acls"));
+  }
+
+  @Test
+  public void testCycleDetection() throws Exception {
+    ActionMetadata actionMetadata = createNiceMock(ActionMetadata.class);
+    OsFamily osFamily = createNiceMock(OsFamily.class);
+    replay(actionMetadata);
+    try {
+    new StackManager(new File("./src/test/resources/stacks_with_cycle/"),
+        new StackContext(null, actionMetadata, osFamily));
+      fail("Expected exception due to cyclic stack");
+    } catch (AmbariException e) {
+      // expected
+      assertEquals("Cycle detected while parsing stack definition", e.getMessage());
+    }
+
+    try {
+      new StackManager(new File("./src/test/resources/stacks_with_cycle2/"),
+          new StackContext(null, actionMetadata, osFamily));
+      fail("Expected exception due to cyclic stack");
+    } catch (AmbariException e) {
+      // expected
+      assertEquals("Cycle detected while parsing stack definition", e.getMessage());
+    }
+  }
+
+  @Test
+  public void testExcludedConfigTypes() {
+    StackInfo stack = stackManager.getStack("HDP", "2.0.8");
+    ServiceInfo service = stack.getService("HBASE");
+    assertFalse(service.hasConfigType("global"));
+    Map<String, Map<String, Map<String, String>>> configTypes = service.getConfigTypeAttributes();
+    assertEquals(2, configTypes.size());
+    assertTrue(configTypes.containsKey("hbase-site"));
+    assertTrue(configTypes.containsKey("hbase-policy"));
+
+    // test version that inherits the service via version inheritance
+    stack = stackManager.getStack("HDP", "2.1.1");
+    service = stack.getService("HBASE");
+    assertFalse(service.hasConfigType("global"));
+    configTypes = service.getConfigTypeAttributes();
+    assertEquals(2, configTypes.size());
+    assertTrue(configTypes.containsKey("hbase-site"));
+    assertTrue(configTypes.containsKey("hbase-policy"));
+    assertFalse(configTypes.containsKey("global"));
+
+    // test version that inherits the service explicit service extension
+    // the new version also excludes hbase-policy
+    stack = stackManager.getStack("OTHER", "2.0");
+    service = stack.getService("HBASE");
+    assertFalse(service.hasConfigType("global"));
+    configTypes = service.getConfigTypeAttributes();
+    assertEquals(1, configTypes.size());
+    assertTrue(configTypes.containsKey("hbase-site"));
+  }
+
+  @Test
+  public void testHDFSServiceContainsMetricsFile() throws Exception {
+    StackInfo stack = stackManager.getStack("HDP", "2.0.6");
+    ServiceInfo hdfsService = stack.getService("HDFS");
+
+    assertEquals("HDFS", hdfsService.getName());
+    assertNotNull(hdfsService.getMetricsFile());
+  }
+
+  /**
+   * This test ensures the service status check is added into the action metadata when
+   * the stack has no parent and is the only stack in the stack family
+   */
+  @Test
+  public void testGetServiceInfoFromSingleStack() throws Exception {
+    dao = createNiceMock(MetainfoDAO.class);
+    actionMetadata = createNiceMock(ActionMetadata.class);
+    osFamily = createNiceMock(OsFamily.class);
+
+    // ensure that service check is added for HDFS
+    actionMetadata.addServiceCheckAction("HDFS");
+    replay(dao, actionMetadata, osFamily);
+    StackManager stackManager = new StackManager(
+        new File("./src/test/resources/single_stack".replace("/", File.separator)),
+        new StackContext(dao, actionMetadata, osFamily));
+
+    Collection<StackInfo> stacks = stackManager.getStacks();
+    assertEquals(1, stacks.size());
+    assertNotNull(stacks.iterator().next().getService("HDFS"));
+
+    verify(dao, actionMetadata, osFamily);
+  }
+
+  //todo: component override assertions
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fc7adec/ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java
index c4eff8c..e8a2a8d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java
@@ -17,46 +17,71 @@
  */
 package org.apache.ambari.server.state;
 
-import org.apache.ambari.server.api.util.StackExtensionHelper;
-import org.apache.ambari.server.state.stack.ConfigurationXml;
+import org.apache.ambari.server.state.stack.StackMetainfoXml;
 import org.junit.Test;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
 
-import java.io.File;
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.Unmarshaller;
+import java.lang.reflect.Field;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
 import static org.junit.Assert.*;
 
 public class PropertyInfoTest {
 
   @Test
-  public void testGetAttributesMap() throws Exception {
-    Map<String, String> attributes;
-    File configFile = new File("./src/test/resources/stacks/HDP/2.0.8/services/HDFS/configuration/hdfs-site.xml");
-    ConfigurationXml configuration = StackExtensionHelper.unmarshal(ConfigurationXml.class, configFile);
-    List<PropertyInfo> properties = configuration.getProperties();
-    PropertyInfo dfsNameDir = properties.get(0);
-    assertNotNull(dfsNameDir);
-    assertEquals("dfs.name.dir", dfsNameDir.getName());
-    attributes = dfsNameDir.getAttributesMap();
-    assertEquals(1, attributes.size());
-    assertTrue(attributes.containsKey("final"));
-    assertEquals("true", attributes.get("final"));
-
-    PropertyInfo dfsSupportAppend = properties.get(1);
-    assertNotNull(dfsSupportAppend);
-    assertEquals("dfs.support.append", dfsSupportAppend.getName());
-    attributes = dfsSupportAppend.getAttributesMap();
+  public void testProperty() {
+    PropertyInfo property = new PropertyInfo();
+    property.setName("name");
+    property.setValue("value");
+    property.setDescription("desc");
+    property.setFilename("filename");
+
+    assertEquals("name", property.getName());
+    assertEquals("value", property.getValue());
+    assertEquals("desc", property.getDescription());
+    assertEquals("filename", property.getFilename());
+  }
+
+  @Test
+  public void testAttributes() throws Exception {
+    PropertyInfo property = new PropertyInfo();
+
+    List<Element> elements = new ArrayList<Element>();
+    Element e1 = createNiceMock(Element.class);
+    Element e2 = createNiceMock(Element.class);
+    Node n1 = createNiceMock(Node.class);
+    Node n2 = createNiceMock(Node.class);
+
+    elements.add(e1);
+    elements.add(e2);
+
+    // set mock expectations
+    expect(e1.getTagName()).andReturn("foo").anyTimes();
+    expect(e1.getFirstChild()).andReturn(n1).anyTimes();
+    expect(n1.getNodeValue()).andReturn("value1").anyTimes();
+
+    expect(e2.getTagName()).andReturn("bar").anyTimes();
+    expect(e2.getFirstChild()).andReturn(n2).anyTimes();
+    expect(n2.getNodeValue()).andReturn("value2").anyTimes();
+
+    replay(e1, e2, n1, n2);
+
+    // set attributes
+    Field f = property.getClass().getDeclaredField("propertyAttributes");
+    f.setAccessible(true);
+    f.set(property, elements);
+
+    Map<String, String> attributes = property.getAttributesMap();
     assertEquals(2, attributes.size());
-    assertTrue(attributes.containsKey("final"));
-    assertEquals("true", attributes.get("final"));
-    assertTrue(attributes.containsKey("deletable"));
-    assertEquals("false", attributes.get("deletable"));
-
-    PropertyInfo dfsWebhdfsEnabled = properties.get(2);
-    assertNotNull(dfsWebhdfsEnabled);
-    assertEquals("dfs.webhdfs.enabled", dfsWebhdfsEnabled.getName());
-    attributes = dfsWebhdfsEnabled.getAttributesMap();
-    assertEquals(0, attributes.size());
+    assertEquals("value1", attributes.get("foo"));
+    assertEquals("value2", attributes.get("bar"));
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fc7adec/ambari-server/src/test/resources/bad-stacks/HDP/0.1/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/bad-stacks/HDP/0.1/repos/repoinfo.xml b/ambari-server/src/test/resources/bad-stacks/HDP/0.1/repos/repoinfo.xml
index 9ea91b8..6948ae7 100644
--- a/ambari-server/src/test/resources/bad-stacks/HDP/0.1/repos/repoinfo.xml
+++ b/ambari-server/src/test/resources/bad-stacks/HDP/0.1/repos/repoinfo.xml
@@ -16,42 +16,11 @@
    limitations under the License.
 -->
 <reposinfo>
-  <os type="centos6, redhat6">
+  <os family="redhat6">
     <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
-      <repoid>HDP-1.1.1.16</repoid>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
+      <repoid>HDP-2.1.1</repoid>
       <reponame>HDP</reponame>
     </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>epel</repoid>
-      <reponame>epel</reponame>
-      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
-      <repoid>HDP-1.1.1.16</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>epel</repoid>
-      <reponame>epel</reponame>
-      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
-    </repo>
   </os>
 </reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fc7adec/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/metainfo.xml
index 3bfd4f7..35a4d05 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/metainfo.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/metainfo.xml
@@ -24,9 +24,7 @@
       <displayName>Hive</displayName>
       <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
       <version>0.11.0.2.0.5.0</version>
-
       <components>
-
         <component>
           <name>HIVE_METASTORE</name>
           <displayName>Hive Metastore</displayName>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fc7adec/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/YARN/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/YARN/metainfo.xml
index 08f9949..beee6a0 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/YARN/metainfo.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/YARN/metainfo.xml
@@ -161,6 +161,10 @@
         <config-type>mapred-site</config-type>
         <config-type>mapred-queue-acls</config-type>
       </configuration-dependencies>
+
+        <excluded-config-types>
+            <config-type>global</config-type>
+        </excluded-config-types>
     </service>
 
   </services>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fc7adec/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HBASE/configuration/hbase-site.xml
new file mode 100644
index 0000000..4270410
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HBASE/configuration/hbase-site.xml
@@ -0,0 +1,356 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.rootdir</name>
+    <value></value>
+    <description>The directory shared by region servers and into
+    which HBase persists.  The URL should be 'fully-qualified'
+    to include the filesystem scheme.  For example, to specify the
+    HDFS directory '/hbase' where the HDFS instance's namenode is
+    running at namenode.example.org on port 9000, set this value to:
+    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
+    into /tmp.  Change this configuration else all data will be lost
+    on machine restart.
+    </description>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+    <description>The mode the cluster will be in. Possible values are
+      false for standalone mode and true for distributed mode.  If
+      false, startup will run all HBase and ZooKeeper daemons together
+      in the one JVM.
+    </description>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value></value>
+    <description>Temporary directory on the local filesystem.
+    Change this setting to point to a location more permanent
+    than '/tmp' (The '/tmp' directory is often cleared on
+    machine restart).
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.bindAddress</name>
+    <value></value>
+    <description>The bind address for the HBase Master web UI
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value></value>
+    <description>The port for the HBase Master web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value></value>
+    <description>The port for the HBase RegionServer web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value></value>
+    <description>Maximum size of all memstores in a region server before new
+      updates are blocked and flushes are forced. Defaults to 40% of heap
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value></value>
+    <description>Count of RPC Listener instances spun up on RegionServers.
+    Same property is used by the Master for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value></value>
+    <description>The time (in miliseconds) between 'major' compactions of all
+    HStoreFiles in a region.  Default: 1 day.
+    Set to 0 to disable automated major compactions.
+    </description>
+  </property>
+  
+  <property>
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value></value>
+    <description>When memstores are being forced to flush to make room in
+      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+      This value equal to hbase.regionserver.global.memstore.upperLimit causes
+      the minimum possible flushing to occur when updates are blocked due to
+      memstore limiting.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value></value>
+    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
+    time hbase.hregion.flush.size bytes.  Useful preventing
+    runaway memstore during spikes in update traffic.  Without an
+    upper-bound, memstore fills such that when it flushes the
+    resultant flush files take a long time to compact or split, or
+    worse, we OOME
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value></value>
+    <description>
+    Memstore will be flushed to disk if size of the memstore
+    exceeds this number of bytes.  Value is checked by a thread that runs
+    every hbase.server.thread.wakefrequency.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.mslab.enabled</name>
+    <value></value>
+    <description>
+      Enables the MemStore-Local Allocation Buffer,
+      a feature which works to prevent heap fragmentation under
+      heavy write loads. This can reduce the frequency of stop-the-world
+      GC pauses on large heaps.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value></value>
+    <description>
+    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+    grown to exceed this value, the hosting HRegion is split in two.
+    Default: 1G.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value></value>
+    <description>Number of rows that will be fetched when calling next
+    on a scanner if it is not served from (local, client) memory. Higher
+    caching values will enable faster scanners but will eat up more memory
+    and some calls of next may take longer and longer times when the cache is empty.
+    Do not set this value such that the time between invocations is greater
+    than the scanner timeout; i.e. hbase.regionserver.lease.period
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value>30000</value>
+    <description>ZooKeeper session timeout.
+      HBase passes this to the zk quorum as suggested maximum time for a
+      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
+      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+      "The client sends a requested timeout, the server responds with the
+      timeout that it can give the client. " In milliseconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.keyvalue.maxsize</name>
+    <value></value>
+    <description>Specifies the combined maximum allowed size of a KeyValue
+    instance. This is to set an upper boundary for a single entry saved in a
+    storage file. Since they cannot be split it helps avoiding that a region
+    cannot be split any further because the data is too large. It seems wise
+    to set this to a fraction of the maximum region size. Setting it to zero
+    or less disables the check.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.compactionThreshold</name>
+    <value></value>
+    <description>
+    If more than this number of HStoreFiles in any one HStore
+    (one HStoreFile is written per flush of memstore) then a compaction
+    is run to rewrite all HStoreFiles files as one.  Larger numbers
+    put off compaction but when it runs, it takes longer to complete.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.flush.retries.number</name>
+    <value>120</value>
+    <description>
+    The number of times the region flush operation will be retried.
+    </description>
+  </property>
+  
+  <property>
+    <name>hbase.hstore.blockingStoreFiles</name>
+    <value></value>
+    <description>
+    If more than this number of StoreFiles in any one Store
+    (one StoreFile is written per flush of MemStore) then updates are
+    blocked for this HRegion until a compaction is completed, or
+    until hbase.hstore.blockingWaitTime has been exceeded.
+    </description>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value></value>
+    <description>
+        Percentage of maximum heap (-Xmx setting) to allocate to block cache
+        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+        Set to 0 to disable but it's not recommended.
+    </description>
+  </property>
+
+  <!-- The following properties configure authentication information for
+       HBase processes when using Kerberos security.  There are no default
+       values, included here for documentation purposes -->
+  <property>
+    <name>hbase.master.keytab.file</name>
+    <value></value>
+    <description>Full path to the kerberos keytab file to use for logging in
+    the configured HMaster server principal.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.kerberos.principal</name>
+    <value></value>
+    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
+    that should be used to run the HMaster process.  The principal name should
+    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
+    portion, it will be replaced with the actual hostname of the running
+    instance.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.keytab.file</name>
+    <value></value>
+    <description>Full path to the kerberos keytab file to use for logging in
+    the configured HRegionServer server principal.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.kerberos.principal</name>
+    <value></value>
+    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
+    that should be used to run the HRegionServer process.  The principal name
+    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
+    hostname portion, it will be replaced with the actual hostname of the
+    running instance.  An entry for this principal must exist in the file
+    specified in hbase.regionserver.keytab.file
+    </description>
+  </property>
+
+  <!-- Additional configuration specific to HBase security -->
+  <property>
+    <name>hbase.superuser</name>
+    <value>hbase</value>
+    <description>List of users or groups (comma-separated), who are allowed
+    full privileges, regardless of stored ACLs, across the cluster.
+    Only used when HBase security is enabled.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.security.authentication</name>
+    <value>simple</value>
+  </property>
+
+  <property>
+    <name>hbase.security.authorization</name>
+    <value>false</value>
+    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value></value>
+    <description>A comma-separated list of Coprocessors that are loaded by
+    default on all tables. For any override coprocessor method, these classes
+    will be called in order. After implementing your own Coprocessor, just put
+    it in HBase's classpath and add the fully qualified class name here.
+    A coprocessor can also be loaded on demand by setting HTableDescriptor.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value></value>
+    <description>A comma-separated list of
+      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+      loaded by default on the active HMaster process. For any implemented
+      coprocessor methods, the listed classes will be called in order. After
+      implementing your own MasterObserver, just put it in HBase's classpath
+      and add the fully qualified class name here.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+
+  <!--
+  The following three properties are used together to create the list of
+  host:peer_port:leader_port quorum servers for ZooKeeper.
+  -->
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value></value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+    By default this is set to localhost for local and pseudo-distributed modes
+    of operation. For a fully-distributed setup, this should be set to a full
+    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+    this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+  </property>
+  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+
+  <property>
+    <name>hbase.zookeeper.useMulti</name>
+    <value>true</value>
+    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
+    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).ยท
+    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
+    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value>/hbase-unsecure</value>
+    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+      files that are configured with a relative path will go under this node.
+      By default, all of HBase's ZooKeeper file path are configured with a
+      relative path, so they will all go under this directory unless changed.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.defaults.for.version.skip</name>
+    <value>true</value>
+    <description>Disables version verification.</description>
+  </property>
+
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <description>Path to domain socket.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fc7adec/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
index 336701a..0864de8 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
@@ -74,6 +74,9 @@
           </commandScript>
         </customCommand>
       </customCommands>
+      <excluded-config-types>
+        <config-type>global</config-type>
+    </excluded-config-types>
       <configuration-dependencies>
       </configuration-dependencies>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fc7adec/ambari-server/src/test/resources/stacks/HDP/2.1.1/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.1.1/services/PIG/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.1.1/services/PIG/metainfo.xml
index 50d3146..f310b70 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.1.1/services/PIG/metainfo.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.1.1/services/PIG/metainfo.xml
@@ -20,8 +20,14 @@
   <services>
     <service>
       <name>PIG</name>
-      <comment>Scripting platform for analyzing large datasets</comment>
+      <comment>Scripting platform for analyzing large datasets (Extended)</comment>
       <version>0.12.1.2.1.1</version>
+        <components>
+          <component>
+            <name>NewComponent</name>
+            <category>SLAVE</category>
+          </component>
+        </components>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fc7adec/ambari-server/src/test/resources/stacks/OTHER/1.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/OTHER/1.0/metainfo.xml b/ambari-server/src/test/resources/stacks/OTHER/1.0/metainfo.xml
new file mode 100644
index 0000000..31716d2
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/OTHER/1.0/metainfo.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+      <active>true</active>
+    </versions>
+</metainfo>
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fc7adec/ambari-server/src/test/resources/stacks/OTHER/1.0/repos/hdp.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/OTHER/1.0/repos/hdp.json b/ambari-server/src/test/resources/stacks/OTHER/1.0/repos/hdp.json
new file mode 100644
index 0000000..fc51627
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/OTHER/1.0/repos/hdp.json
@@ -0,0 +1,10 @@
+{
+  "HDP-2.1.1": {
+    "latest": {
+      "centos6": "http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.1.1.0-118",
+      "redhat6": "http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.1.1.0-118",
+      "oraclelinux6": "http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.1.1.0-118",
+      "suse11": "http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11/2.x/BUILDS/2.1.1.0-118/hdp.repo"
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fc7adec/ambari-server/src/test/resources/stacks/OTHER/1.0/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/OTHER/1.0/repos/repoinfo.xml b/ambari-server/src/test/resources/stacks/OTHER/1.0/repos/repoinfo.xml
new file mode 100644
index 0000000..9d8a232
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/OTHER/1.0/repos/repoinfo.xml
@@ -0,0 +1,62 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <latest>./hdp.json</latest>
+  <os family="centos6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
+      <repoid>HDP-2.1.1</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os family="centos5">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.6.0</baseurl>
+      <repoid>HDP-2.1.1</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
+      <repoid>HDP-2.1.1</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os family="redhat5">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.6.0</baseurl>
+      <repoid>HDP-2.1.1</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os family="suse11">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/2.x/updates/2.0.6.0</baseurl>
+      <repoid>HDP-2.1.1</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os family="sles11">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/2.x/updates/2.0.6.0</baseurl>
+      <repoid>HDP-2.1.1</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fc7adec/ambari-server/src/test/resources/stacks/OTHER/1.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/OTHER/1.0/role_command_order.json b/ambari-server/src/test/resources/stacks/OTHER/1.0/role_command_order.json
new file mode 100644
index 0000000..c45ba07
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/OTHER/1.0/role_command_order.json
@@ -0,0 +1,104 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
+        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
+    "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
+    "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
+    "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "WEBHCAT_SERVER-START": ["TASKTRACKER-START", "HIVE_SERVER-START"],
+    "HIVE_METASTORE-START": ["MYSQL_SERVER-START"],
+    "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
+    "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
+    "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
+    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
+        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
+        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
+        "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
+        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
+        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
+    "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
+    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
+    "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"],
+    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"],
+    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
+    "TASKTRACKER-UPGRADE": ["JOBTRACKER-UPGRADE"],
+    "MAPREDUCE_CLIENT-UPGRADE": ["TASKTRACKER-UPGRADE", "JOBTRACKER-UPGRADE"],
+    "ZOOKEEPER_SERVER-UPGRADE": ["MAPREDUCE_CLIENT-UPGRADE"],
+    "ZOOKEEPER_CLIENT-UPGRADE": ["ZOOKEEPER_SERVER-UPGRADE"],
+    "HBASE_MASTER-UPGRADE": ["ZOOKEEPER_CLIENT-UPGRADE"],
+    "HBASE_REGIONSERVER-UPGRADE": ["HBASE_MASTER-UPGRADE"],
+    "HBASE_CLIENT-UPGRADE": ["HBASE_REGIONSERVER-UPGRADE"],
+    "HIVE_SERVER-UPGRADE" : ["HBASE_CLIENT-UPGRADE"],
+    "HIVE_METASTORE-UPGRADE" : ["HIVE_SERVER-UPGRADE"],
+    "MYSQL_SERVER-UPGRADE": ["HIVE_METASTORE-UPGRADE"],
+    "HIVE_CLIENT-UPGRADE": ["MYSQL_SERVER-UPGRADE"],
+    "HCAT-UPGRADE": ["HIVE_CLIENT-UPGRADE"],
+    "OOZIE_SERVER-UPGRADE" : ["HCAT-UPGRADE"],
+    "OOZIE_CLIENT-UPGRADE" : ["OOZIE_SERVER-UPGRADE"],
+    "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
+    "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
+    "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
+    "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
+    "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
+    "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
+  },
+  "_comment" : "GLUSTERFS-specific dependencies",
+  "optional_glusterfs": {
+    "HBASE_MASTER-START": ["PEERSTATUS-START"],
+    "JOBTRACKER-START": ["PEERSTATUS-START"],
+    "TASKTRACKER-START": ["PEERSTATUS-START"],
+    "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"],
+    "JOBTRACKER-UPGRADE": ["GLUSTERFS_CLIENT-UPGRADE"]
+  },
+  "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
+  "optional_no_glusterfs": {
+    "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
+    "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
+    "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
+    "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
+    "JOBTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
+    "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HIVE_SERVER-START": ["DATANODE-START"],
+    "WEBHCAT_SERVER-START": ["DATANODE-START"],
+    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
+        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
+    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
+        "SECONDARY_NAMENODE-START"],
+    "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
+        "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
+    "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "NAMENODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP",
+        "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+    "DATANODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP",
+        "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+    "SECONDARY_NAMENODE-UPGRADE": ["NAMENODE-UPGRADE"],
+    "DATANODE-UPGRADE": ["SECONDARY_NAMENODE-UPGRADE"],
+    "HDFS_CLIENT-UPGRADE": ["DATANODE-UPGRADE"],
+    "JOBTRACKER-UPGRADE": ["HDFS_CLIENT-UPGRADE"]
+  },
+  "_comment" : "Dependencies that are used in HA NameNode cluster",
+  "namenode_optional_ha": {
+    "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
+    "ZKFC-START": ["NAMENODE-START"],
+    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
+    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
+  },
+  "_comment" : "Dependencies that are used in ResourceManager HA cluster",
+  "resourcemanager_optional_ha" : {
+    "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fc7adec/ambari-server/src/test/resources/stacks/OTHER/1.0/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/OTHER/1.0/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks/OTHER/1.0/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..c40fbd0
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/OTHER/1.0/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+
+  <property>
+    <name>dfs.name.dir</name>
+    <value></value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+    <deletable>false</deletable>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>true</value>
+    <description>to enable webhdfs</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fc7adec/ambari-server/src/test/resources/stacks/OTHER/1.0/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/OTHER/1.0/services/HDFS/metainfo.xml b/ambari-server/src/test/resources/stacks/OTHER/1.0/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..d3bef74
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/OTHER/1.0/services/HDFS/metainfo.xml
@@ -0,0 +1,146 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>2.1.0.2.0.6.0</version>
+
+      <components>
+        <component>
+          <name>NAMENODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/namenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode_dec_overr.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>YET_ANOTHER_CHILD_COMMAND</name>
+              <commandScript>
+                <script>scripts/yet_another_child_command.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>DATANODE</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>SECONDARY_NAMENODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/snamenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HDFS_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hdfs_client_overridden.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>JOURNALNODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/journalnode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>ZKFC</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/zkfc_slave.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>child-package-def</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check_2.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <customCommands>
+        <customCommand>
+          <name>RESTART</name>
+          <commandScript>
+            <script>scripts/restart_child.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </customCommand>
+        <customCommand>
+          <name>YET_ANOTHER_CHILD_SRV_COMMAND</name>
+          <commandScript>
+            <script>scripts/yet_another_child_srv_command.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </customCommand>
+      </customCommands>
+
+      <configuration-dependencies>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>