You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by om...@apache.org on 2011/12/06 17:19:15 UTC

svn commit: r1210997 [2/2] - in /incubator/ambari/trunk: ./ client/src/main/java/org/apache/ambari/common/rest/entities/ controller/ controller/src/main/java/org/apache/ambari/components/ controller/src/main/java/org/apache/ambari/components/impl/ cont...

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/ClustersResource.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/ClustersResource.java?rev=1210997&r1=1210996&r2=1210997&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/ClustersResource.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/ClustersResource.java Tue Dec  6 16:19:14 2011
@@ -17,7 +17,6 @@
 */
 package org.apache.ambari.controller.rest.resources;
 
-import java.io.ByteArrayInputStream;
 import java.util.List;
 
 import org.apache.ambari.common.rest.entities.ClusterDefinition;
@@ -27,16 +26,14 @@ import org.apache.ambari.common.rest.ent
 import org.apache.ambari.common.rest.entities.Stack;
 import org.apache.ambari.controller.Clusters;
 import org.apache.ambari.controller.ExceptionResponse;
-import org.apache.ambari.controller.Nodes;
 import org.apache.ambari.controller.rest.config.Examples;
 
-import com.sun.jersey.spi.resource.Singleton;
+import com.google.inject.Inject;
 
 import javax.ws.rs.DELETE;
 import javax.ws.rs.GET;
 import javax.ws.rs.Consumes;
 import javax.ws.rs.DefaultValue;
-import javax.ws.rs.POST;
 import javax.ws.rs.PUT;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
@@ -53,6 +50,14 @@ import javax.ws.rs.core.Response;
 @Path("clusters")
 public class ClustersResource {
     
+    private static Clusters clusters;
+    
+    @Inject
+    static void init(Clusters clus) {
+      System.out.println("In ClustersResource init");
+      clusters = clus;
+    }
+    
     /** 
      * Get the list of clusters.
      *
@@ -80,7 +85,7 @@ public class ClustersResource {
                                  @DefaultValue("") @QueryParam("search") String search) throws Exception {
         List<ClusterInformation> searchResults = null;
         try {
-            searchResults = Clusters.getInstance().getClusterInformationList(state);
+            searchResults = clusters.getClusterInformationList(state);
             if (searchResults.isEmpty()) {
                 throw new WebApplicationException(Response.Status.NO_CONTENT);
             }   
@@ -111,7 +116,7 @@ public class ClustersResource {
     @Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML})
     public ClusterInformation getClusterDefinition(@PathParam("clusterName") String clusterName) throws WebApplicationException {
         try {
-            return Clusters.getInstance().getClusterInformation(clusterName);
+            return clusters.getClusterInformation(clusterName);
         }catch (WebApplicationException we) {
             throw we;
         }catch (Exception e) {
@@ -166,7 +171,7 @@ public class ClustersResource {
            @DefaultValue("false") @QueryParam("dry_run") boolean dry_run,
            ClusterDefinition cluster) throws Exception {    
         try {
-            return Clusters.getInstance().updateCluster(clusterName, cluster, dry_run);
+            return clusters.updateCluster(clusterName, cluster, dry_run);
         }catch (WebApplicationException we) {
             throw we;
         }catch (Exception e) {
@@ -196,7 +201,7 @@ public class ClustersResource {
            @PathParam("clusterName") String clusterName,
            @DefaultValue("") @QueryParam("new_name") String new_name) throws Exception {    
         try {
-            Clusters.getInstance().renameCluster(clusterName, new_name);
+            clusters.renameCluster(clusterName, new_name);
             return Response.ok().build();
         }catch (WebApplicationException we) {
             throw we;
@@ -226,7 +231,7 @@ public class ClustersResource {
     @Consumes({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML})
     public Response deleteCluster(@PathParam("clusterName") String clusterName) throws Exception {
         try {
-            Clusters.getInstance().deleteCluster(clusterName);
+            clusters.deleteCluster(clusterName);
             return Response.ok().build();
         }catch (WebApplicationException we) {
             throw we;
@@ -261,7 +266,7 @@ public class ClustersResource {
     @Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML})
     public ClusterState getClusterState(@PathParam("clusterName") String clusterName) throws Exception {
         try {
-            return Clusters.getInstance().getClusterState(clusterName);
+            return clusters.getClusterState(clusterName);
         }catch (WebApplicationException we) {
             throw we;
         }catch (Exception e) {
@@ -301,7 +306,7 @@ public class ClustersResource {
                                 @DefaultValue("") @QueryParam("role") String role,
                                 @DefaultValue("") @QueryParam("alive") String alive) throws Exception {    
         try {
-            List<Node> list = Nodes.getInstance().getClusterNodes(clusterName, role, alive);
+            List<Node> list = clusters.getClusterNodes(clusterName, role, alive);
             
             if (list.isEmpty()) {
                 String msg = "No nodes found!";
@@ -337,7 +342,7 @@ public class ClustersResource {
     public Stack getClusterStack (@PathParam("clusterName") String clusterName,
                                 @DefaultValue("true") @QueryParam("expanded") boolean expanded) throws Exception {    
         try {
-            return Clusters.getInstance().getClusterStack(clusterName, expanded);
+            return clusters.getClusterStack(clusterName, expanded);
         }catch (WebApplicationException we) {
             throw we;
         }catch (Exception e) {

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/NodesResource.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/NodesResource.java?rev=1210997&r1=1210996&r2=1210997&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/NodesResource.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/NodesResource.java Tue Dec  6 16:19:14 2011
@@ -34,12 +34,21 @@ import org.apache.ambari.controller.Exce
 import org.apache.ambari.controller.Nodes;
 import org.apache.ambari.controller.rest.config.Examples;
 
+import com.google.inject.Inject;
+
 
 /** Nodes Resource represents collection of cluster nodes.
  */
 @Path("nodes")
 public class NodesResource {
             
+    private static Nodes nodes;
+    
+    @Inject
+    static void init(Nodes n) {
+      nodes = n;
+    }
+
     /** Get list of nodes
      *
      *  The "allocated and "alive" are the boolean variables that specify the type of nodes to return based on their state i.e. if they are already allocated to any cluster and live or dead. 
@@ -62,7 +71,7 @@ public class NodesResource {
                                     @DefaultValue("") @QueryParam("alive") String alive) throws Exception {
         List<Node> list;
         try {
-            list = Nodes.getInstance().getNodesByState(allocated, alive);
+            list = nodes.getNodesByState(allocated, alive);
             if (list.isEmpty()) {
                 throw new WebApplicationException(Response.Status.NO_CONTENT);
             }   
@@ -94,7 +103,7 @@ public class NodesResource {
     @Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML})
     public Node getNode (@PathParam("hostname") String hostname) throws Exception {
         try {
-            return Nodes.getInstance().getNode(hostname);
+            return nodes.getNode(hostname);
         }catch (WebApplicationException we) {
             throw we;
         }catch (Exception e) {

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/StacksResource.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/StacksResource.java?rev=1210997&r1=1210996&r2=1210997&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/StacksResource.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/StacksResource.java Tue Dec  6 16:19:14 2011
@@ -17,7 +17,6 @@
 */
 package org.apache.ambari.controller.rest.resources;
 
-import java.io.ByteArrayInputStream;
 import java.util.List;
 
 import javax.ws.rs.Consumes;
@@ -35,10 +34,13 @@ import javax.ws.rs.core.Response;
 
 import org.apache.ambari.common.rest.entities.Stack;
 import org.apache.ambari.common.rest.entities.StackInformation;
+import org.apache.ambari.controller.Clusters;
 import org.apache.ambari.controller.Stacks;
 import org.apache.ambari.controller.ExceptionResponse;
 import org.apache.ambari.controller.rest.config.Examples;
 
+import com.google.inject.Inject;
+
 /** 
  * StackResource represents a Hadoop stack to be installed on a 
  * cluster. Stacks define a collection of Hadoop components that are
@@ -47,6 +49,15 @@ import org.apache.ambari.controller.rest
 @Path("stacks")
 public class StacksResource {
  
+    private static Stacks stacks;
+    private static Clusters clusters;
+    
+    @Inject
+    static void init(Stacks s, Clusters c) {
+      stacks = s;
+      clusters = c;
+    }
+    
     /** 
      * Get the list of stacks
      * 
@@ -64,7 +75,7 @@ public class StacksResource {
     public List<StackInformation> listStacks() throws Exception {
         List<StackInformation> list;
         try {
-            list = Stacks.getInstance().getStackList();
+            list = stacks.getStackList();
             if (list.isEmpty()) {
                 throw new WebApplicationException(Response.Status.NO_CONTENT);
             } 
@@ -94,7 +105,7 @@ public class StacksResource {
     public Stack getStack(@PathParam("stackName") String stackName, 
                                   @DefaultValue("-1") @QueryParam("revision") String revision) throws Exception {     
         try {
-            return Stacks.getInstance().getStack(stackName, Integer.parseInt(revision));
+            return stacks.getStack(stackName, Integer.parseInt(revision));
         }catch (WebApplicationException we) {
             throw we;
         }catch (Exception e) {
@@ -119,7 +130,7 @@ public class StacksResource {
     @Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML})
     public List<StackInformation> getstackRevisions(@PathParam("stackName") String stackName) throws Exception {     
         try {
-            List<StackInformation> list = Stacks.getInstance().getStackRevisions(stackName);
+            List<StackInformation> list = stacks.getStackRevisions(stackName);
             if (list.isEmpty()) {
                 throw new WebApplicationException(Response.Status.NO_CONTENT);
             }
@@ -146,7 +157,12 @@ public class StacksResource {
     @Consumes({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML})
     public Response deletestack(@PathParam("stackName") String stackName) throws Exception {     
         try {
-            Stacks.getInstance().deleteStack(stackName);
+            if (clusters.isStackUsed(stackName)) {
+              throw new WebApplicationException(new ExceptionResponse(stackName+ 
+                  " is still used by one or more clusters.",
+                  Response.Status.BAD_REQUEST).get());
+            }
+            stacks.deleteStack(stackName);
             return Response.ok().build();
         }catch (WebApplicationException we) {
             throw we;
@@ -184,9 +200,9 @@ public class StacksResource {
                                      Stack stack) throws Exception {
         try {
             if (locationURL == null || locationURL.equals("")) {
-                return Stacks.getInstance().addStack(stackName, stack);
+                return stacks.addStack(stackName, stack);
             } else {
-                return Stacks.getInstance().importDefaultStack (stackName, locationURL);
+                return stacks.importDefaultStack (stackName, locationURL);
             }
         }catch (WebApplicationException we) {
             throw we;

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/PersistentDataStore.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/PersistentDataStore.java?rev=1210997&r1=1210996&r2=1210997&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/PersistentDataStore.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/PersistentDataStore.java Tue Dec  6 16:19:14 2011
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.ambari.datastore;
 
 import java.io.IOException;
@@ -7,6 +24,9 @@ import org.apache.ambari.common.rest.ent
 import org.apache.ambari.common.rest.entities.Stack;
 import org.apache.ambari.common.rest.entities.ClusterDefinition;
 
+/**
+ * Abstraction that stores the Ambari state.
+ */
 public interface PersistentDataStore {
     
     

Added: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/impl/StaticDataStore.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/impl/StaticDataStore.java?rev=1210997&view=auto
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/impl/StaticDataStore.java (added)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/impl/StaticDataStore.java Tue Dec  6 16:19:14 2011
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.datastore.impl;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Unmarshaller;
+
+import org.apache.ambari.common.rest.entities.ClusterDefinition;
+import org.apache.ambari.common.rest.entities.ClusterState;
+import org.apache.ambari.common.rest.entities.Stack;
+import org.apache.ambari.datastore.PersistentDataStore;
+
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+
+/**
+ * A data store that uses in-memory maps and some preset values for testing.
+ */
+@Singleton
+public class StaticDataStore implements PersistentDataStore {
+
+  private Map<String, List<ClusterDefinition>> clusters = 
+      new TreeMap<String, List<ClusterDefinition>>();
+
+  private Map<String, List<Stack>> stacks =
+      new TreeMap<String, List<Stack>>();
+  
+  private Map<String, ClusterState> clusterStates =
+      new TreeMap<String, ClusterState>();
+
+  private static final JAXBContext jaxbContext;
+  static {
+    try {
+      jaxbContext = JAXBContext.
+          newInstance("org.apache.ambari.common.rest.entities");
+    } catch (JAXBException e) {
+      throw new RuntimeException("Can't create jaxb context", e);
+    }
+  }
+
+  @Inject
+  StaticDataStore() throws IOException {
+    addStackFile("org/apache/ambari/stacks/hadoop-security-0.xml", 
+                 "hadoop-security");
+    addStackFile("org/apache/ambari/stacks/cluster123-0.xml", "cluster123");
+    addStackFile("org/apache/ambari/stacks/cluster124-0.xml", "cluster124");
+    addStackFile("org/apache/ambari/stacks/puppet1-0.xml", "puppet1");
+    addClusterFile("org/apache/ambari/clusters/cluster123.xml", "cluster123");
+  }
+
+  private void addStackFile(String filename, 
+                            String stackName) throws IOException {
+    InputStream in = ClassLoader.getSystemResourceAsStream(filename);
+    if (in == null) {
+      throw new IllegalArgumentException("Can't find resource for " + filename);
+    }
+    try {
+      Unmarshaller um = jaxbContext.createUnmarshaller();
+      Stack stack = (Stack) um.unmarshal(in);
+      storeStack(stackName, stack);
+    } catch (JAXBException je) {
+      throw new IOException("Can't parse " + filename, je);
+    }
+  }
+
+  private void addClusterFile(String filename,
+                              String clusterName) throws IOException {
+    InputStream in = ClassLoader.getSystemResourceAsStream(filename);
+    if (in == null) {
+      throw new IllegalArgumentException("Can't find resource for " + filename);
+    }
+    try {
+      Unmarshaller um = jaxbContext.createUnmarshaller();
+      ClusterDefinition cluster = (ClusterDefinition) um.unmarshal(in);
+      cluster.setName(clusterName);
+      storeClusterDefinition(cluster);
+    } catch (JAXBException je) {
+      throw new IOException("Can't parse " + filename, je);
+    }    
+  }
+
+  @Override
+  public void close() throws IOException {
+    // PASS
+  }
+
+  @Override
+  public boolean clusterExists(String clusterName) throws IOException {
+    return clusters.containsKey(clusterName);
+  }
+
+  @Override
+  public int retrieveLatestClusterRevisionNumber(String clusterName)
+      throws IOException {
+    return clusters.get(clusterName).size()-1;
+  }
+
+  @Override
+  public void storeClusterState(String clusterName, 
+                                ClusterState clsState) throws IOException {
+    clusterStates.put(clusterName, clsState);
+  }
+
+  @Override
+  public ClusterState retrieveClusterState(String clusterName)
+      throws IOException {
+    return clusterStates.get(clusterName);
+  }
+
+  @Override
+  public int storeClusterDefinition(ClusterDefinition clusterDef
+                                    ) throws IOException {
+    String name = clusterDef.getName();
+    List<ClusterDefinition> list = clusters.get(name);
+    if (list == null) {
+      list = new ArrayList<ClusterDefinition>();
+      clusters.put(name, list);
+    }
+    list.add(clusterDef);
+    return list.size() - 1;
+  }
+
+  @Override
+  public ClusterDefinition retrieveClusterDefinition(String clusterName,
+      int revision) throws IOException {
+    return clusters.get(clusterName).get(revision);
+  }
+
+  @Override
+  public List<String> retrieveClusterList() throws IOException {
+    return new ArrayList<String>(clusters.keySet());
+  }
+
+  @Override
+  public void deleteCluster(String clusterName) throws IOException {
+    clusters.remove(clusterName);
+  }
+
+  @Override
+  public int storeStack(String stackName, Stack stack) throws IOException {
+    List<Stack> list = stacks.get(stackName);
+    if (list == null) {
+      list = new ArrayList<Stack>();
+      stacks.put(stackName, list);
+    }
+    list.add(stack);
+    return list.size() - 1;
+  }
+
+  @Override
+  public Stack retrieveStack(String stackName, 
+                             int revision) throws IOException {
+    return stacks.get(stackName).get(revision);
+  }
+
+  @Override
+  public List<String> retrieveStackList() throws IOException {
+    return new ArrayList<String>(stacks.keySet());
+  }
+
+  @Override
+  public int retrieveLatestStackRevisionNumber(String stackName
+                                               ) throws IOException {
+    return stacks.get(stackName).size() - 1;
+  }
+
+  @Override
+  public void deleteStack(String stackName) throws IOException {
+    stacks.remove(stackName);
+  }
+
+  @Override
+  public boolean stackExists(String stackName) throws IOException {
+    return stacks.containsKey(stackName);
+  }
+
+}

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/impl/ZookeeperDS.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/impl/ZookeeperDS.java?rev=1210997&r1=1210996&r2=1210997&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/impl/ZookeeperDS.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/impl/ZookeeperDS.java Tue Dec  6 16:19:14 2011
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.ambari.datastore.impl;
 
 import java.io.IOException;
@@ -16,6 +33,13 @@ import org.apache.zookeeper.ZooDefs.Ids;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.data.Stat;
 
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+
+/**
+ * Implementation of the data store based on Zookeeper.
+ */
+@Singleton
 public class ZookeeperDS implements PersistentDataStore, Watcher {
 
     private static final String DEFAULT_ZOOKEEPER_ADDRESS="localhost:2181";
@@ -27,7 +51,7 @@ public class ZookeeperDS implements Pers
     private String credential = null;
     private boolean zkCoonected = false;
     
-    private static ZookeeperDS ZookeeperDSRef=null;
+    @Inject
     private ZookeeperDS() {
         /*
          * TODO: Read ZooKeeper address and credential from config file
@@ -58,17 +82,6 @@ public class ZookeeperDS implements Pers
         }
     }
     
-    public static synchronized ZookeeperDS getInstance() {
-        if(ZookeeperDSRef == null) {
-            ZookeeperDSRef = new ZookeeperDS();
-        }
-        return ZookeeperDSRef;
-    }
-
-    public Object clone() throws CloneNotSupportedException {
-        throw new CloneNotSupportedException();
-    }
-    
     @Override
     public void close() throws IOException {
         // TODO Auto-generated method stub

Added: incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/clusters/cluster123.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/clusters/cluster123.xml?rev=1210997&view=auto
==============================================================================
--- incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/clusters/cluster123.xml (added)
+++ incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/clusters/cluster123.xml Tue Dec  6 16:19:14 2011
@@ -0,0 +1,8 @@
+<cluster description="Owen's cluster"
+         stackName="cluster123" stackRevision="0"
+         goalState="active" nodes="node00,node01,node02,node03">
+  <activeServices>HDFS</activeServices>
+  <activeServices>MapReduce</activeServices>
+  <roleToNodesMap roleName="namenode" nodes="node00"/>
+  <roleToNodesMap roleName="jobtracker" nodes="node01"/>
+</cluster>

Modified: incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/puppet1-0.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/puppet1-0.xml?rev=1210997&r1=1210996&r2=1210997&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/puppet1-0.xml (original)
+++ incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/puppet1-0.xml Tue Dec  6 16:19:14 2011
@@ -43,9 +43,9 @@
                 <property name="*mapred.system.dir" value="/mapred/mapredsystem" />  
                 <property name="*mapred.local.dir" value="/var/lib/hadoop/mapred1,/var/lib/hadoop/mapred2" /> 
                 <property name="mapred.tasktracker.tasks.sleeptime-before-sigkill" value="250" />
-                <property name="*mapred.job.tracker" value="<%= ambari.mapred.jobtracker.host %>:9000" />
-                <property name="*mapred.job.tracker.http.address" value="<%= ambari.mapred.jobtracker.host %>:50030" />
-                <property name="mapreduce.cluster.administrators" value="<%= ambari.mapreduce.user %>" />
+                <property name="*mapred.job.tracker" value="&lt;%= ambari.mapred.jobtracker.host %&gt;:9000" />
+                <property name="*mapred.job.tracker.http.address" value="&lt;%= ambari.mapred.jobtracker.host %&gt;:50030" />
+                <property name="mapreduce.cluster.administrators" value="&lt;%= ambari.mapreduce.user %&gt;" />
                 <property name="mapred.map.tasks.speculative.execution" value="false" />
                 <property name="mapred.reduce.tasks.speculative.execution" value="false" />
                 <property name="mapred.output.compression.type" value="BLOCK" />
@@ -55,38 +55,38 @@
                 <property name="mapred.child.java.opts" value="-server -Xmx640m -Djava.net.preferIPv4Stack=true" />
                 <property name="mapred.child.ulimit" value="8388608" />
                 <property name="mapred.job.tracker.persist.jobstatus.active" value="true" />
-                <property name="mapred.job.tracker.persist.jobstatus.dir" value="file:////var/log/hadoop/<%= ambari.mapreduce.user %>/jobstatus" /> 
+                <property name="mapred.job.tracker.persist.jobstatus.dir" value="file:////var/log/hadoop/&lt;%= ambari.mapreduce.user %&gt;/jobstatus" /> 
                 <property name="mapred.job.tracker.history.completed.location" value="/mapred/history/done" /> 
                 <property name="mapred.heartbeats.in.second" value="200" />
                 <property name="mapreduce.tasktracker.outofband.heartbeat" value="true" />
                 <property name="*mapred.jobtracker.maxtasks.per.job" value="200000" />
-                <property name="mapreduce.jobtracker.kerberos.principal" value="<%= ambari.jobtracker.principal %>/_HOST@<%= ambari.service.realm %>" />
-                <property name="mapreduce.tasktracker.kerberos.principal" value="<%= ambari.tasktracker.principal %>/_HOST@<%= ambari.service.realm %>" />
+                <property name="mapreduce.jobtracker.kerberos.principal" value="&lt;%= ambari.jobtracker.principal %&gt;/_HOST@&lt;%= ambari.service.realm %&gt;" />
+                <property name="mapreduce.tasktracker.kerberos.principal" value="&lt;%= ambari.tasktracker.principal %&gt;/_HOST@&lt;%= ambari.service.realm %&gt;" />
                 <property name="hadoop.job.history.user.location" value="none" />
-                <property name="mapreduce.jobtracker.keytab.file" value="/etc/security/keytabs/<%= ambari.jobtracker.principal %>.service.keytab" />    
-                <property name="mapreduce.tasktracker.keytab.file" value="/etc/security/keytabs/<%= ambari.tasktracker.principal %>.service.keytab" />  
+                <property name="mapreduce.jobtracker.keytab.file" value="/etc/security/keytabs/&lt;%= ambari.jobtracker.principal %&gt;.service.keytab" />    
+                <property name="mapreduce.tasktracker.keytab.file" value="/etc/security/keytabs/&lt;%= ambari.tasktracker.principal %&gt;.service.keytab" />  
                 <property name="mapreduce.jobtracker.staging.root.dir" value="/user" />
                 <property name="mapreduce.job.acl-modify-job" value="" />
                 <property name="mapreduce.job.acl-view-job" value="Dr.Who" />
-                <property name="mapreduce.tasktracker.group" value="<%= ambari.admin.group %>" />
+                <property name="mapreduce.tasktracker.group" value="&lt;%= ambari.admin.group %&gt;" />
                 <property name="mapred.acls.enabled" value="true" />
                 <property name="mapred.jobtracker.taskScheduler" value="org.apache.hadoop.mapred.CapacityTaskScheduler" />
                 <property name="mapred.queue.names" value="default" />
                 <property name="mapreduce.history.server.embedded" value="false" />
-                <property name="mapreduce.history.server.http.address" value="<%= ambari.mapred.jobtracker.host %>:51111" />
-                <property name="mapreduce.jobhistory.kerberos.principal" value="<%= ambari.jobtracker.principal %>/_HOST@<%= ambari.service.realm %>" />
-                <property name="mapreduce.jobhistory.keytab.file" value="/etc/security/keytabs/<%= ambari.jobtracker.principal %>.service.keytab" />
-                <property name="mapred.hosts" value="<%= ambari.hadoop_conf_dir %>/<%= ambari.cluster.name %>-<%= ambari.role.name %>/mapred.include" />
-                <property name="mapred.hosts.exclude" value="<%= ambari.hadoop_conf_dir %>/<%= ambari.cluster.name %>-<%= ambari.role.name %>/mapred.exclude" />
+                <property name="mapreduce.history.server.http.address" value="&lt;%= ambari.mapred.jobtracker.host %&gt;:51111" />
+                <property name="mapreduce.jobhistory.kerberos.principal" value="&lt;%= ambari.jobtracker.principal %&gt;/_HOST@&lt;%= ambari.service.realm %&gt;" />
+                <property name="mapreduce.jobhistory.keytab.file" value="/etc/security/keytabs/&lt;%= ambari.jobtracker.principal %&gt;.service.keytab" />
+                <property name="mapred.hosts" value="&lt;%= ambari.hadoop_conf_dir %&gt;/&lt;%= ambari.cluster.name %&gt;-&lt;%= ambari.role.name %&gt;/mapred.include" />
+                <property name="mapred.hosts.exclude" value="&lt;%= ambari.hadoop_conf_dir %&gt;/&lt;%= ambari.cluster.name %&gt;-&lt;%= ambari.role.name %&gt;/mapred.exclude" />
                 <property name="mapred.jobtracker.retirejob.check" value="10000" />
                 <property name="mapred.jobtracker.retirejob.interval" value="0" />
             </category>
 
             <category name="core-site">
-                <property name="local.realm" value=<%= ambari.service.realm %> /> 
+                <property name="local.realm" value="&lt;%= ambari.service.realm %&gt;" /> 
                 <property name="fs.default.name" value="hdfs://${ambari.hdfs.namenode.host}:8020" />
                 <property name="fs.trash.interval" value="360" />
-                <property name="hadoop.security.auth_to_local" value="RULE:[1:$1@$0](.*@<%= ambari.user.realm %>)s/@.*// RULE:[2:$1@$0](${ambari.jobtracker.principal}@<%= ambari.service.realm %>)s/.*/${ambari.mapreduce.user}/ RULE:[2:$1@$0](${ambari.tasktracer.principal}@<%= ambari.service.realm %>)s/.*/${ambari.mapreduce.user}/ RULE:[2:$1@$0](${ambari.namenode.principal}@<%= ambari.service.realm %>)s/.*/${ambari.hdfs.user}/ RULE:[2:$1@$0](${ambari.datanode.principal}@<%= ambari.service.realm %>)s/.*/${ambari.hdfs.user}/ RULE:[2:$1@$0](${ambari.hbasemaster.principal}@<%= ambari.service.realm %>)s/.*/${ambari.hbase.user}/ RULE:[2:$1@$0](${ambari.regionserver.principal}@<%= ambari.service.realm %>)s/.*/${ambari.hbase.user}/ RULE:[2:$1@$0](${ambari.hcat.principal}@<%= ambari.service.realm %>)s/.*/${ambari.hcat.user}/" />
+                <property name="hadoop.security.auth_to_local" value="RULE:[1:$1@$0](.*@&lt;%= ambari.user.realm %&gt;)s/@.*// RULE:[2:$1@$0](${ambari.jobtracker.principal}@&lt;%= ambari.service.realm %&gt;)s/.*/${ambari.mapreduce.user}/ RULE:[2:$1@$0](${ambari.tasktracer.principal}@&lt;%= ambari.service.realm %&gt;)s/.*/${ambari.mapreduce.user}/ RULE:[2:$1@$0](${ambari.namenode.principal}@&lt;%= ambari.service.realm %&gt;)s/.*/${ambari.hdfs.user}/ RULE:[2:$1@$0](${ambari.datanode.principal}@&lt;%= ambari.service.realm %&gt;)s/.*/${ambari.hdfs.user}/ RULE:[2:$1@$0](${ambari.hbasemaster.principal}@&lt;%= ambari.service.realm %&gt;)s/.*/${ambari.hbase.user}/ RULE:[2:$1@$0](${ambari.regionserver.principal}@&lt;%= ambari.service.realm %&gt;)s/.*/${ambari.hbase.user}/ RULE:[2:$1@$0](${ambari.hcat.principal}@&lt;%= ambari.service.realm %&gt;)s/.*/${ambari.hcat.user}/" />
                 <property name="hadoop.security.authentication" value="simple" />
                 <property name="hadoop.security.authorization" value="false" />
                 <property name="hadoop.security.groups.cache.secs" value="14400" />
@@ -104,12 +104,12 @@
                 <property name="dfs.umaskmode" value="077" />
 
                 <property name="dfs.block.access.token.enable" value="false" />
-                <property name="dfs.namenode.kerberos.principal" value="${ambari.namenode.principal}/_HOST@<%= ambari.service.realm %>" />
-                <property name="dfs.namenode.kerberos.https.principal" value="host/_HOST@<%= ambari.service.realm %>" />
-                <property name="dfs.secondary.namenode.kerberos.principal" value="${ambari.namenode.principal}/_HOST@<%= ambari.service.realm %>" />
-                <property name="dfs.secondary.namenode.kerberos.https.principal" value="host/_HOST@<%= ambari.service.realm %>" />
-                <property name="dfs.datanode.kerberos.principal" value="${ambari.datanode.principal}/_HOST@<%= ambari.service.realm %>" />
-                <property name="dfs.web.authentication.kerberos.principal" value="HTTP/_HOST@<%= ambari.service.realm %>" />
+                <property name="dfs.namenode.kerberos.principal" value="${ambari.namenode.principal}/_HOST@&lt;%= ambari.service.realm %&gt;" />
+                <property name="dfs.namenode.kerberos.https.principal" value="host/_HOST@&lt;%= ambari.service.realm %&gt;" />
+                <property name="dfs.secondary.namenode.kerberos.principal" value="${ambari.namenode.principal}/_HOST@&lt;%= ambari.service.realm %&gt;" />
+                <property name="dfs.secondary.namenode.kerberos.https.principal" value="host/_HOST@&lt;%= ambari.service.realm %&gt;" />
+                <property name="dfs.datanode.kerberos.principal" value="${ambari.datanode.principal}/_HOST@&lt;%= ambari.service.realm %&gt;" />
+                <property name="dfs.web.authentication.kerberos.principal" value="HTTP/_HOST@&lt;%= ambari.service.realm %&gt;" />
 
                 <property name="dfs.web.authentication.kerberos.keytab" value="/etc/security/keytabs/${ambari.namenode.principal}.service.keytab" />
                 <property name="dfs.namenode.keytab.file" value="/etc/security/keytabs/${ambari.namenode.principal}.service.keytab" />
@@ -173,8 +173,8 @@
                 <property name="mapred.heartbeats.in.second" value="200" />
                 <property name="mapreduce.tasktracker.outofband.heartbeat" value="true" />
                 <property name="*mapred.jobtracker.maxtasks.per.job" value="200000" />
-                <property name="mapreduce.jobtracker.kerberos.principal" value="${ambari.jobtracker.principal}/_HOST@<%= ambari.service.realm %>" />
-                <property name="mapreduce.tasktracker.kerberos.principal" value="${ambari.tasktracker.principal}/_HOST@<%= ambari.service.realm %>" />
+                <property name="mapreduce.jobtracker.kerberos.principal" value="${ambari.jobtracker.principal}/_HOST@&lt;%= ambari.service.realm %&gt;" />
+                <property name="mapreduce.tasktracker.kerberos.principal" value="${ambari.tasktracker.principal}/_HOST@&lt;%= ambari.service.realm %&gt;" />
                 <property name="hadoop.job.history.user.location" value="none" />
                 <property name="mapreduce.jobtracker.keytab.file" value="/etc/security/keytabs/${ambari.jobtracker.principal}.service.keytab" />    
                 <property name="mapreduce.tasktracker.keytab.file" value="/etc/security/keytabs/${ambari.tasktracker.principal}.service.keytab" />  
@@ -187,7 +187,7 @@
                 <property name="mapred.queue.names" value="default" />
                 <property name="mapreduce.history.server.embedded" value="false" />
                 <property name="mapreduce.history.server.http.address" value="${ambari.mapred.jobtracker.host}:51111" />
-                <property name="mapreduce.jobhistory.kerberos.principal" value="${ambari.jobtracker.principal}/_HOST@<%= ambari.service.realm %>" />
+                <property name="mapreduce.jobhistory.kerberos.principal" value="${ambari.jobtracker.principal}/_HOST@&lt;%= ambari.service.realm %&gt;" />
                 <property name="mapreduce.jobhistory.keytab.file" value="/etc/security/keytabs/${ambari.jobtracker.principal}.service.keytab" />
                 <property name="mapred.hosts" value="${ambari.HADOOP_CONF_DIR}/mapred.include" />
                 <property name="mapred.hosts.exclude" value="${ambari.HADOOP_CONF_DIR}/mapred.exclude" />
@@ -196,10 +196,10 @@
             </category>
 
             <category name="core-site">
-                <property name="local.realm" value=<%= ambari.service.realm %> /> 
+                <property name="local.realm" value="&lt;%= ambari.service.realm %&gt;" /> 
                 <property name="fs.default.name" value="hdfs://${ambari.hdfs.namenode.host}:8020" />
                 <property name="fs.trash.interval" value="360" />
-                <property name="hadoop.security.auth_to_local" value="RULE:[1:$1@$0](.*@<%= ambari.user.realm %>)s/@.*// RULE:[2:$1@$0](${ambari.jobtracker.principal}@<%= ambari.service.realm %>)s/.*/${ambari.mapreduce.user}/ RULE:[2:$1@$0](${ambari.tasktracer.principal}@<%= ambari.service.realm %>)s/.*/${ambari.mapreduce.user}/ RULE:[2:$1@$0](${ambari.namenode.principal}@<%= ambari.service.realm %>)s/.*/${ambari.hdfs.user}/ RULE:[2:$1@$0](${ambari.datanode.principal}@<%= ambari.service.realm %>)s/.*/${ambari.hdfs.user}/ RULE:[2:$1@$0](${ambari.hbasemaster.principal}@<%= ambari.service.realm %>)s/.*/${ambari.hbase.user}/ RULE:[2:$1@$0](${ambari.regionserver.principal}@<%= ambari.service.realm %>)s/.*/${ambari.hbase.user}/ RULE:[2:$1@$0](${ambari.hcat.principal}@<%= ambari.service.realm %>)s/.*/${ambari.hcat.user}/" />
+                <property name="hadoop.security.auth_to_local" value="RULE:[1:$1@$0](.*@&lt;%= ambari.user.realm %&gt;)s/@.*// RULE:[2:$1@$0](${ambari.jobtracker.principal}@&lt;%= ambari.service.realm %&gt;)s/.*/${ambari.mapreduce.user}/ RULE:[2:$1@$0](${ambari.tasktracer.principal}@&lt;%= ambari.service.realm %&gt;)s/.*/${ambari.mapreduce.user}/ RULE:[2:$1@$0](${ambari.namenode.principal}@&lt;%= ambari.service.realm %&gt;)s/.*/${ambari.hdfs.user}/ RULE:[2:$1@$0](${ambari.datanode.principal}@&lt;%= ambari.service.realm %&gt;)s/.*/${ambari.hdfs.user}/ RULE:[2:$1@$0](${ambari.hbasemaster.principal}@&lt;%= ambari.service.realm %&gt;)s/.*/${ambari.hbase.user}/ RULE:[2:$1@$0](${ambari.regionserver.principal}@&lt;%= ambari.service.realm %&gt;)s/.*/${ambari.hbase.user}/ RULE:[2:$1@$0](${ambari.hcat.principal}@&lt;%= ambari.service.realm %&gt;)s/.*/${ambari.hcat.user}/" />
                 <property name="hadoop.security.authentication" value="simple" />
                 <property name="hadoop.security.authorization" value="false" />
                 <property name="hadoop.security.groups.cache.secs" value="14400" />
@@ -218,12 +218,12 @@
                 <property name="dfs.umaskmode" value="077" />
 
                 <property name="dfs.block.access.token.enable" value="false" />
-                <property name="dfs.namenode.kerberos.principal" value="${ambari.namenode.principal}/_HOST@<%= ambari.service.realm %>" />
-                <property name="dfs.namenode.kerberos.https.principal" value="host/_HOST@<%= ambari.service.realm %>" />
-                <property name="dfs.secondary.namenode.kerberos.principal" value="${ambari.namenode.principal}/_HOST@<%= ambari.service.realm %>" />
-                <property name="dfs.secondary.namenode.kerberos.https.principal" value="host/_HOST@<%= ambari.service.realm %>" />
-                <property name="dfs.datanode.kerberos.principal" value="${ambari.datanode.principal}/_HOST@<%= ambari.service.realm %>" />
-                <property name="dfs.web.authentication.kerberos.principal" value="HTTP/_HOST@<%= ambari.service.realm %>" />
+                <property name="dfs.namenode.kerberos.principal" value="${ambari.namenode.principal}/_HOST@&lt;%= ambari.service.realm %&gt;" />
+                <property name="dfs.namenode.kerberos.https.principal" value="host/_HOST@&lt;%= ambari.service.realm %&gt;" />
+                <property name="dfs.secondary.namenode.kerberos.principal" value="${ambari.namenode.principal}/_HOST@&lt;%= ambari.service.realm %&gt;" />
+                <property name="dfs.secondary.namenode.kerberos.https.principal" value="host/_HOST@&lt;%= ambari.service.realm %&gt;" />
+                <property name="dfs.datanode.kerberos.principal" value="${ambari.datanode.principal}/_HOST@&lt;%= ambari.service.realm %&gt;" />
+                <property name="dfs.web.authentication.kerberos.principal" value="HTTP/_HOST@&lt;%= ambari.service.realm %&gt;" />
 
                 <property name="dfs.web.authentication.kerberos.keytab" value="/etc/security/keytabs/${ambari.namenode.principal}.service.keytab" />
                 <property name="dfs.namenode.keytab.file" value="/etc/security/keytabs/${ambari.namenode.principal}.service.keytab" />

Added: incubator/ambari/trunk/controller/src/test/java/org/apache/ambari/controller/StackFlattenerTest.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/test/java/org/apache/ambari/controller/StackFlattenerTest.java?rev=1210997&view=auto
==============================================================================
--- incubator/ambari/trunk/controller/src/test/java/org/apache/ambari/controller/StackFlattenerTest.java (added)
+++ incubator/ambari/trunk/controller/src/test/java/org/apache/ambari/controller/StackFlattenerTest.java Tue Dec  6 16:19:14 2011
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.controller;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.ambari.common.rest.entities.Component;
+import org.apache.ambari.common.rest.entities.ComponentDefinition;
+import org.apache.ambari.common.rest.entities.Configuration;
+import org.apache.ambari.common.rest.entities.ConfigurationCategory;
+import org.apache.ambari.common.rest.entities.Property;
+import org.apache.ambari.common.rest.entities.RepositoryKind;
+import org.apache.ambari.common.rest.entities.Role;
+import org.apache.ambari.common.rest.entities.Stack;
+import org.apache.ambari.components.ComponentPlugin;
+import org.apache.ambari.components.ComponentPluginFactory;
+
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+import static org.testng.AssertJUnit.assertEquals;
+
+public class StackFlattenerTest {
+  
+  Stacks stacks;
+  Stack parentStack;
+  Stack childStack;
+  Stack grandchildStack;
+  ComponentPluginFactory plugins;
+  ComponentPlugin hdfs;
+  ComponentPlugin mapreduce;
+  ComponentDefinition hdfsDefn;
+  ComponentDefinition mapreduceDefn;
+  Component parentHdfs;
+  Component parentMapreduce;
+  StackFlattener flattener;
+  
+  @BeforeMethod
+  public void setup() throws Exception {
+    stacks =  mock(Stacks.class);
+    parentStack = new Stack();
+    parentStack.setName("parent");
+    childStack = new Stack();
+    childStack.setName("child");
+    grandchildStack = new Stack();
+    grandchildStack.setName("grandchild");
+    childStack.setParentName(parentStack.getName());
+    childStack.setParentRevision(0);
+    grandchildStack.setParentName(childStack.getName());
+    grandchildStack.setParentRevision(0);
+    when(stacks.getStack(parentStack.getName(), 0)).thenReturn(parentStack);
+    when(stacks.getStack(childStack.getName(), 0)).thenReturn(childStack);    
+    when(stacks.getStack(grandchildStack.getName(), 0)).
+      thenReturn(grandchildStack);
+    plugins = mock(ComponentPluginFactory.class);
+    hdfs = mock(ComponentPlugin.class);
+    when(hdfs.getActiveRoles()).
+      thenReturn(new String[]{"namenode", "datanode"});
+    mapreduce = mock(ComponentPlugin.class);
+    when(mapreduce.getActiveRoles()).
+      thenReturn(new String[]{"jobtracker","tasktracker"});
+    hdfsDefn = new ComponentDefinition("hdfs", "org.apache.ambari", "0");
+    mapreduceDefn = new ComponentDefinition("mapreduce", "org.apache.ambari", 
+                                            "0");
+    when(plugins.getPlugin(hdfsDefn)).thenReturn(hdfs);
+    when(plugins.getPlugin(mapreduceDefn)).thenReturn(mapreduce);
+    parentHdfs = new Component("hdfs", "0.20.205.0", "i386", 
+                               "org.apache.ambari",
+                               new ComponentDefinition("hdfs", 
+                                   "org.apache.ambari", "0"), 
+                               new Configuration(), new ArrayList<Role>());
+    parentMapreduce = new Component("mapreduce", "0.20.205.0", "i386", 
+                                    "org.apache.ambari",
+                                    new ComponentDefinition("mapreduce", 
+                                                       "org.apache.ambari","0"), 
+                                    new Configuration(), new ArrayList<Role>());
+    List<Component> compList = new ArrayList<Component>();
+    parentStack.setComponents(compList);
+    compList.add(parentHdfs);
+    compList.add(parentMapreduce);
+    flattener = new StackFlattener(stacks, plugins);
+  }
+
+  @Test
+  public void testRepositoryFlattening() throws Exception {
+    parentStack.setPackageRepositories(Arrays.asList
+        (new RepositoryKind("kind1", "url1", "url2"),
+         new RepositoryKind("kind2", "url3", "url4")));
+    childStack.setPackageRepositories(Arrays.asList
+        (new RepositoryKind("kind3", "url5")));
+    grandchildStack.setPackageRepositories(Arrays.asList
+        (new RepositoryKind("kind1", "url7", "url8"),
+         new RepositoryKind("kind3", "url9", "url10")));
+    grandchildStack.setRevision("123");
+    Stack flat = flattener.flattenStack("grandchild", 0);
+    List<RepositoryKind> answer = flat.getPackageRepositories();
+    assertEquals(new RepositoryKind("kind1", "url7", "url8", "url1", "url2"), 
+                 answer.get(0));
+    assertEquals(new RepositoryKind("kind2", "url3", "url4"), answer.get(1));
+    assertEquals(new RepositoryKind("kind3", "url9", "url10", "url5"), 
+                 answer.get(2));
+    
+    // ensure the name and parent name are what we expect
+    assertEquals("grandchild", flat.getName());
+    assertEquals("123", flat.getRevision());
+    assertEquals(null, flat.getParentName());
+    assertEquals(0, flat.getParentRevision());
+  }
+ 
+  static void setConfigParam(Configuration conf, String category, String key,
+                             String value) {
+    for(ConfigurationCategory cat: conf.getCategory()) {
+      if (cat.getName().equals(category)) {
+        for(Property prop: cat.getProperty()) {
+          if (prop.getName().equals(key)) {
+            // if we find the right property, update it
+            prop.setValue(value);
+          }
+        }
+        // otherwise add a new property
+        cat.getProperty().add(new Property(key, value));
+      }
+    }
+    // otherwise, it is a new category
+    List<Property> propList = new ArrayList<Property>();
+    propList.add(new Property(key,value));
+    conf.getCategory().add(new ConfigurationCategory(category, propList));
+  }
+  
+  static String getConfigParam(Configuration conf, String category, 
+                               String key) {
+    for(ConfigurationCategory cat: conf.getCategory()) {
+      if (cat.getName().equals(category)) {
+        for(Property prop: cat.getProperty()) {
+          if (prop.getName().equals(key)) {
+            return prop.getValue();
+          }
+        }
+        return null;
+      }
+    }
+    return null;
+  }
+
+  @Test
+  public void testConfigFlattening() throws Exception {
+    Configuration parentConfiguration = new Configuration();
+    parentStack.setConfiguration(parentConfiguration);
+    Configuration childConfiguration = new Configuration();
+    childStack.setConfiguration(childConfiguration);
+    Configuration grandchildConfiguration = new Configuration();
+    grandchildStack.setConfiguration(grandchildConfiguration);
+    Configuration parentHdfsConfig = parentHdfs.getConfiguration();
+    Configuration parentMapredConfig = parentMapreduce.getConfiguration();
+    List<Role> hdfsRoles = new ArrayList<Role>();
+    Configuration childHdfsConfig = new Configuration();
+    childStack.getComponents().add(
+        new Component("hdfs", null, null, null, null, 
+                      childHdfsConfig, hdfsRoles));
+    Configuration nnConf = new Configuration();
+    hdfsRoles.add(new Role("namenode", nnConf));
+    setConfigParam(parentConfiguration, "cat1", "b", "parent");
+    setConfigParam(parentConfiguration, "cat1", "a", "a-value");
+    setConfigParam(parentConfiguration, "cat2", "b", "cat2-value");
+    setConfigParam(childConfiguration, "cat1", "b", "child");
+    setConfigParam(parentHdfsConfig, "cat1", "b", "parent-hdfs");
+    setConfigParam(parentHdfsConfig, "cat1", "d", "d-value");
+    setConfigParam(parentMapredConfig, "cat1", "b", "parent-mapred");
+    setConfigParam(grandchildConfiguration, "cat1", "b", "grandchild");
+    setConfigParam(childHdfsConfig, "cat1", "b", "child-hdfs");
+    setConfigParam(nnConf, "cat1", "b", "nn");
+    setConfigParam(nnConf, "cat1", "c", "nn-c");
+    Stack flat = flattener.flattenStack("grandchild", 0);
+    Configuration conf = flat.getConfiguration();
+    assertEquals("a-value", getConfigParam(conf, "cat1", "a"));
+    assertEquals("cat2-value", getConfigParam(conf, "cat2", "b"));
+    assertEquals("grandchild", getConfigParam(conf, "cat1", "b"));
+    assertEquals(null, getConfigParam(conf, "cat1", "c"));
+    assertEquals(null, getConfigParam(conf, "cat1", "d"));
+    Component comp = flat.getComponents().get(0);
+    assertEquals("hdfs", comp.getName());
+    assertEquals(null, comp.getConfiguration());
+    Role role = comp.getRoles().get(0);
+    assertEquals("namenode", role.getName());
+    conf = role.getConfiguration();
+    assertEquals("a-value", getConfigParam(conf, "cat1", "a"));
+    assertEquals("cat2-value", getConfigParam(conf, "cat2", "b"));
+    assertEquals("grandchild", getConfigParam(conf, "cat1", "b"));
+    assertEquals("nn-c", getConfigParam(conf, "cat1", "c"));
+    assertEquals("d-value", getConfigParam(conf, "cat1", "d"));
+    role = comp.getRoles().get(1);
+    assertEquals("datanode", role.getName());
+    conf = role.getConfiguration();
+    assertEquals("a-value", getConfigParam(conf, "cat1", "a"));
+    assertEquals("cat2-value", getConfigParam(conf, "cat2", "b"));
+    assertEquals("grandchild", getConfigParam(conf, "cat1", "b"));
+    assertEquals(null, getConfigParam(conf, "cat1", "c"));
+    assertEquals("d-value", getConfigParam(conf, "cat1", "d"));
+  }
+}