You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by vg...@apache.org on 2011/11/27 01:31:26 UTC

svn commit: r1206648 - in /incubator/ambari/trunk: ./ client/src/main/java/org/apache/ambari/common/rest/entities/ controller/src/main/java/org/apache/ambari/controller/ controller/src/main/java/org/apache/ambari/controller/rest/resources/ controller/s...

Author: vgogate
Date: Sun Nov 27 00:31:25 2011
New Revision: 1206648

URL: http://svn.apache.org/viewvc?rev=1206648&view=rev
Log:
AMBARI-138. Implement stack persistence

Added:
    incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/puppet1-0.xml
Modified:
    incubator/ambari/trunk/CHANGES.txt
    incubator/ambari/trunk/client/src/main/java/org/apache/ambari/common/rest/entities/Stack.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Cluster.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Clusters.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Controller.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Stacks.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/StacksResource.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/PersistentDataStore.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/impl/ZookeeperDS.java

Modified: incubator/ambari/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/CHANGES.txt?rev=1206648&r1=1206647&r2=1206648&view=diff
==============================================================================
--- incubator/ambari/trunk/CHANGES.txt (original)
+++ incubator/ambari/trunk/CHANGES.txt Sun Nov 27 00:31:25 2011
@@ -2,6 +2,8 @@ Ambari Change log
 
 Release 0.1.0 - unreleased
 
+  AMBARI-138. Implement stack persistence (vgogate)
+
   AMBARI-135. Simplifies the heartbeat handling to not deal with 
   install/configure methods on component plugin definitions (ddas)
 

Modified: incubator/ambari/trunk/client/src/main/java/org/apache/ambari/common/rest/entities/Stack.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/client/src/main/java/org/apache/ambari/common/rest/entities/Stack.java?rev=1206648&r1=1206647&r2=1206648&view=diff
==============================================================================
--- incubator/ambari/trunk/client/src/main/java/org/apache/ambari/common/rest/entities/Stack.java (original)
+++ incubator/ambari/trunk/client/src/main/java/org/apache/ambari/common/rest/entities/Stack.java Sun Nov 27 00:31:25 2011
@@ -268,22 +268,4 @@ public class Stack {
     public void setCreationTime(XMLGregorianCalendar creationTime) {
         this.creationTime = creationTime;
     }
-    
-    /**
-     * @param creationTime the creationTime to set
-     */
-    public void setCreationTime(Date creationTime) throws IOException {
-        if (creationTime == null) {
-            this.creationTime = null;
-        } else {
-            GregorianCalendar cal = new GregorianCalendar();
-            cal.setTime(creationTime);
-            try {
-              this.creationTime = 
-                  DatatypeFactory.newInstance().newXMLGregorianCalendar(cal);
-            } catch (DatatypeConfigurationException e) {
-              throw new IOException("can't create calendar", e);
-            }
-        }
-    }
 }

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Cluster.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Cluster.java?rev=1206648&r1=1206647&r2=1206648&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Cluster.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Cluster.java Sun Nov 27 00:31:25 2011
@@ -74,8 +74,7 @@ public class Cluster {
     public synchronized void init () throws Exception {
         this.latestRevisionNumber = dataStore.retrieveLatestClusterRevisionNumber(clusterName);
         this.latestDefinition = dataStore.retrieveClusterDefinition(clusterName, this.latestRevisionNumber);
-        loadPlugins(this.latestDefinition);
-        //this.clusterState = dataStore.retrieveClusterState(clusterName);  
+        loadPlugins(this.latestDefinition);  
         this.clusterDefinitionRevisionsList.put(this.latestRevisionNumber, this.latestDefinition);
     }
     

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Clusters.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Clusters.java?rev=1206648&r1=1206647&r2=1206648&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Clusters.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Clusters.java Sun Nov 27 00:31:25 2011
@@ -134,7 +134,6 @@ public class Clusters {
         rnm.add(rnme);
         
         cluster124.setRoleToNodesMap(rnm);
-        
         try {
             if (!clusterExists(cluster123.getName())) {
                 addCluster(cluster123.getName(), cluster123, false);
@@ -234,6 +233,35 @@ public class Clusters {
         this.purgeClusterEntry(clusterName);
     }
     
+    /*
+     * Delete Cluster 
+     * Delete operation will mark the cluster to_be_deleted and then set the goal state to ATTIC
+     * Once cluster gets to ATTIC state, background daemon should purge the cluster entry.
+     */
+    public synchronized void deleteCluster(String clusterName) throws Exception { 
+    
+        if (!this.clusterExists(clusterName)) {
+            System.out.println("Cluster ["+clusterName+"] does not exist!");
+            return;
+        }
+        
+        /*
+         * Update the cluster definition with goal state to be ATTIC
+         */
+        Cluster cls = this.getClusterByName(clusterName);   
+        ClusterDefinition cdf = new ClusterDefinition();
+        cdf.setName(clusterName);
+        cdf.setGoalState(ClusterState.CLUSTER_STATE_ATTIC);
+        cls.updateClusterDefinition(cdf);
+        
+        /* 
+         * Update cluster state, mark it "to be deleted"
+         */
+        ClusterState cs = cls.getClusterState();
+        cs.setMarkForDeletionWhenInAttic(true); 
+        cls.updateClusterState(cs);
+    }
+
     /* 
      * Create/Update cluster definition 
      * TODO: As nodes or role to node association changes, validate key services nodes are not removed
@@ -353,9 +381,11 @@ public class Clusters {
         cls.updateClusterState(cs);
         
         /*
+         * Create Puppet config
+         
         if (configChanged || updateNodeToRolesAssociation || updateNodesReservation) {
             String puppetConfig = this.getPuppetConfigString (newcd);
-            cls.updatePuppetConfiguration(puppetConfig);
+            //cls.updatePuppetConfiguration(puppetConfig);
         }*/
         
         /*
@@ -369,51 +399,19 @@ public class Clusters {
         }
         
         /*
-         * If configChanged or nodes changed then generate the 
-         */
-        
-        /*
          * Invoke state machine event
          */
         if(c.getGoalState().equals(ClusterState.CLUSTER_STATE_ACTIVE)) {
           StateMachineInvoker.startCluster(cls.getName());
-        } else if(c.getGoalState().
-            equals(ClusterState.CLUSTER_STATE_INACTIVE)) {
+        } else if(c.getGoalState().equals(ClusterState.CLUSTER_STATE_INACTIVE)) {
           StateMachineInvoker.stopCluster(cls.getName());
-        } else if(c.getGoalState().
-            equals(ClusterState.CLUSTER_STATE_ATTIC)) {
+        } else if(c.getGoalState().equals(ClusterState.CLUSTER_STATE_ATTIC)) {
           StateMachineInvoker.deleteCluster(cls.getName());
         }
-     
-        return cls.getClusterDefinition(-1);
-    }
     
-    /*
-     * Add default values for new cluster definition 
-     */
-    private void setNewClusterDefaults(ClusterDefinition cdef) throws Exception {
-        /* 
-         * Populate the input cluster definition w/ default values
-         */
-        if (cdef.getDescription() == null) { cdef.setDescription("Ambari cluster : "+cdef.getName());
-        }
-        if (cdef.getGoalState() == null) { cdef.setGoalState(ClusterDefinition.GOAL_STATE_INACTIVE);
-        }
-        
-        /*
-         * If its new cluster, do not specify the revision, set it to null. A revision number is obtained
-         * after persisting the definition
-         */
-        cdef.setRevision(null);
-        
-        // TODO: Add the list of active services by querying pluging component.
-        if (cdef.getActiveServices() == null) {
-            List<String> services = new ArrayList<String>();
-            services.add("ALL");
-            cdef.setActiveServices(services);
-        }    
+        return cls.getClusterDefinition(-1);
     }
-    
+
     /* 
      * Add new Cluster to cluster list  
      */   
@@ -463,14 +461,18 @@ public class Clusters {
         }
         
         /*
-         * Persist the new cluster and add entry to cache
-         * 
-         */
-        Cluster cls = this.addClusterEntry(cdef, clsState);
+         * TODO: Create and update the puppet configuration
+         
+        String puppetConfig = this.getPuppetConfigString (cdef);
+        System.out.println("==============================");
+        System.out.println(puppetConfig);
+        System.out.println("==============================");
+        */
         
         /*
-         * TODO: Create and update the puppet configuration
+         * Persist the new cluster and add entry to cache
          */
+        Cluster cls = this.addClusterEntry(cdef, clsState);
         
         /*
          * Update cluster nodes reservation. 
@@ -499,13 +501,39 @@ public class Clusters {
             cs.activate();
         }
         return cdef;
-    } 
+    }
+
+    /*
+     * Add default values for new cluster definition 
+     */
+    private void setNewClusterDefaults(ClusterDefinition cdef) throws Exception {
+        /* 
+         * Populate the input cluster definition w/ default values
+         */
+        if (cdef.getDescription() == null) { cdef.setDescription("Ambari cluster : "+cdef.getName());
+        }
+        if (cdef.getGoalState() == null) { cdef.setGoalState(ClusterDefinition.GOAL_STATE_INACTIVE);
+        }
+        
+        /*
+         * If its new cluster, do not specify the revision, set it to null. A revision number is obtained
+         * after persisting the definition
+         */
+        cdef.setRevision(null);
+        
+        // TODO: Add the list of active services by querying pluging component.
+        if (cdef.getActiveServices() == null) {
+            List<String> services = new ArrayList<String>();
+            services.add("ALL");
+            cdef.setActiveServices(services);
+        }    
+    }
     
     /*
      * Create RoleToNodes list based on node attributes
      * TODO: For now just pick some nodes randomly
      */
-    public List<RoleToNodes> generateRoleToNodesListBasedOnNodeAttributes (ClusterDefinition cdef) {
+    private List<RoleToNodes> generateRoleToNodesListBasedOnNodeAttributes (ClusterDefinition cdef) {
         List<RoleToNodes> role2NodesList = new ArrayList<RoleToNodes>();
         return role2NodesList;
     }
@@ -672,7 +700,7 @@ public class Clusters {
      * This function disassociate all the nodes from the cluster. The clsuterID associated w/
      * cluster will be reset by heart beat when node reports all clean.
      */
-    public synchronized void releaseClusterNodes (String clusterName) throws Exception {
+    private synchronized void releaseClusterNodes (String clusterName) throws Exception {
         for (Node clusterNode : Nodes.getInstance().getClusterNodes (clusterName, "", "")) {
             clusterNode.releaseNodeFromCluster();     
         }
@@ -749,35 +777,6 @@ public class Clusters {
     
     
     /*
-     * Delete Cluster 
-     * Delete operation will mark the cluster to_be_deleted and then set the goal state to ATTIC
-     * Once cluster gets to ATTIC state, background daemon should purge the cluster entry.
-     */
-    public synchronized void deleteCluster(String clusterName) throws Exception { 
-
-        if (!this.clusterExists(clusterName)) {
-            System.out.println("Cluster ["+clusterName+"] does not exist!");
-            return;
-        }
-        
-        /*
-         * Update the cluster definition with goal state to be ATTIC
-         */
-        Cluster cls = this.getClusterByName(clusterName);   
-        ClusterDefinition cdf = new ClusterDefinition();
-        cdf.setName(clusterName);
-        cdf.setGoalState(ClusterState.CLUSTER_STATE_ATTIC);
-        cls.updateClusterDefinition(cdf);
-        
-        /* 
-         * Update cluster state, mark it "to be deleted"
-         */
-        ClusterState cs = cls.getClusterState();
-        cs.setMarkForDeletionWhenInAttic(true); 
-        cls.updateClusterState(cs);
-    }      
-    
-    /*
      * Get the latest cluster definition
      */
     public ClusterDefinition getLatestClusterDefinition(String clusterName) throws Exception {
@@ -923,12 +922,13 @@ public class Clusters {
   }
   
   private String getPuppetConfigString (ClusterDefinition c) throws Exception {
+      // TODO: ignore if comps or roles are not present in stack.
       Stacks stacksCtx = Stacks.getInstance();
       Stack stack = stacksCtx.getStack(c.getStackName(), Integer.parseInt(c.getStackRevision()));
-      String config = "";
+      String config = "\n$hadoop_stack_conf = { ";
       for (Component comp : stack.getComponents()) {
           for (Role role : comp.getRoles()) {
-              config = config + "\n"+"$"+comp.getName()+"_"+role.getName()+"_conf => { ";
+              config = config + comp.getName()+"_"+role.getName()+" => { ";
               for (ConfigurationCategory cat : role.getConfiguration().getCategory()) {
                    config = config+"\""+cat.getName()+"\" => { ";
                    for (Property p : cat.getProperty()) {
@@ -936,17 +936,21 @@ public class Clusters {
                    }
                    config = config +" }, \n";
               }
-              config = config + "} \n";
-          }
+              config = config + "}, \n";
+          }   
       }
+      config = config + "} \n";
       
+      config = config + "$role_to_nodes = { ";
       for (RoleToNodes roleToNodesEntry : c.getRoleToNodesMap()) {
-          config = config + "$"+roleToNodesEntry.getRoleName()+"_hosts = [";
+          config = config + roleToNodesEntry.getRoleName()+ " => [";
           for (String host : this.getHostnamesFromRangeExpressions(roleToNodesEntry.getNodes())) {
               config = config + "\'"+host+"\',";
           }
           config = config + "] \n";
       }
+      config = config + "} \n";
+      
       return config;
   }
 }

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Controller.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Controller.java?rev=1206648&r1=1206647&r2=1206648&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Controller.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Controller.java Sun Nov 27 00:31:25 2011
@@ -116,6 +116,7 @@ public class Controller {
        *  opening up the server to clients
        */
       clustersCtx.recoverClustersStateAfterRestart();
+      stacksCtx.recoverStacksAfterRestart();
       
       /*
        * Start the server after controller state is recovered.

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Stacks.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Stacks.java?rev=1206648&r1=1206647&r2=1206648&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Stacks.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Stacks.java Sun Nov 27 00:31:25 2011
@@ -42,6 +42,8 @@ import org.apache.ambari.common.rest.ent
 import org.apache.ambari.common.rest.entities.StackInformation;
 import org.apache.ambari.common.rest.entities.Component;
 import org.apache.ambari.common.rest.entities.Property;
+import org.apache.ambari.datastore.DataStoreFactory;
+import org.apache.ambari.datastore.PersistentDataStore;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.mortbay.log.Log;
@@ -57,13 +59,16 @@ public class Stacks {
         loadDummyStack(jaxbContext, "hadoop-security", 0);
         loadDummyStack(jaxbContext, "cluster123", 0);
         loadDummyStack(jaxbContext, "cluster124", 0);
+        //loadDummyStack(jaxbContext, "puppet1", 0);
       } catch (JAXBException e) {
         throw new RuntimeException("Can't create jaxb context", e);
+      } catch (Exception e) {
+        throw new RuntimeException("Can't create jaxb context", e);
       }
     }
     
     public void loadDummyStack (JAXBContext jaxbContext,
-                                    String name, int revision) {
+                                    String name, int revision) throws Exception {
         try {
             Unmarshaller um = jaxbContext.createUnmarshaller();
             String resourceName =
@@ -73,7 +78,7 @@ public class Stacks {
             Stack bp = (Stack) um.unmarshal(in);
             bp.setName(name);
             bp.setRevision(Integer.toString(revision));
-            addStack(bp);
+            addStack(name, bp);
         } catch (IOException e) {
             Log.warn("Problem loading stack " + name + " rev " + revision,
                      e);
@@ -95,35 +100,50 @@ public class Stacks {
     }
       
     /*
-     * Stack name -> {revision -> Stack} .
+     * Stack name -> latest revision is always cached for each stack.
+     * 
+     */
+    protected ConcurrentHashMap<String, Integer> stacks = new ConcurrentHashMap<String, Integer>();
+    protected PersistentDataStore dataStore = DataStoreFactory.getDataStore(DataStoreFactory.ZOOKEEPER_TYPE);
+    
+    
+    /*
+     * Check if stack exists. Names and latest version number is always 
+     * cached in the memory
      */
-    protected ConcurrentHashMap<String, ConcurrentHashMap<Integer,Stack>> stacks = new ConcurrentHashMap<String,ConcurrentHashMap<Integer,Stack>>();
+    public boolean stackExists(String stackName) {
+        if (!this.stacks.containsKey(stackName)) {  
+            return false;
+        } 
+        return true;
+    }
     
+    public int getStackLatestRevision(String stackName) {
+        return this.stacks.get(stackName).intValue();
+    }
     
     /*
      * Get stack. If revision = -1 then return latest revision
      */
     public Stack getStack(String stackName, int revision) throws Exception {
+        
+        if (!stackExists(stackName)) {
+            String msg = "Stack ["+stackName+"] is not defined";
+            throw new WebApplicationException ((new ExceptionResponse(msg, Response.Status.NOT_FOUND)).get());
+        }
+        
         /*
          * If revision is -1, then return the latest revision
          */  
         Stack bp = null;
-        if (!this.stacks.containsKey(stackName)) {  
-            String msg = "Stack ["+stackName+"] is not defined";
-            throw new WebApplicationException ((new ExceptionResponse(msg, Response.Status.NOT_FOUND)).get());
-        }
-        if (revision == -1) {
-            this.stacks.get(stackName).keySet();
-            Integer [] a = new Integer [] {};
-            Integer[] keys = this.stacks.get(stackName).keySet().toArray(a);
-            Arrays.sort(keys);  
-            bp = this.stacks.get(stackName).get(keys[keys.length-1]);
+        if (revision < 0) {
+            bp = dataStore.retrieveStack(stackName, getStackLatestRevision(stackName));
         } else {
-            if (!this.stacks.get(stackName).containsKey(revision)) {  
+            if ( revision > getStackLatestRevision(stackName)) {  
                 String msg = "Stack ["+stackName+"], revision ["+revision+"] does not exist";
                 throw new WebApplicationException ((new ExceptionResponse(msg, Response.Status.NOT_FOUND)).get());
             }
-            bp = this.stacks.get(stackName).get(revision);
+            bp = dataStore.retrieveStack(stackName, revision);
         }
         return bp;  
     }
@@ -131,33 +151,20 @@ public class Stacks {
     /*
      * Add or update the stack
      */
-    public Stack addStack(Stack bp) throws IOException {
-        
+    public Stack addStack(String stackName, Stack bp) throws Exception {
         /*
-         * Validate and set the defaults
+         * Validate and set the defaults add the stack as new revision
          */
-        validateAndSetStackDefaults(bp);
-        
-        if (stacks.containsKey(bp.getName())) {
-            if (stacks.get(bp.getName()).containsKey(new Integer(bp.getRevision()))) {
-                String msg = "Specified stack [Name:"+bp.getName()+", Revision: ["+bp.getRevision()+"] already imported";
-                throw new WebApplicationException((new ExceptionResponse(msg, Response.Status.BAD_REQUEST)).get());
-            } else {
-                stacks.get(bp.getName()).put(new Integer(bp.getRevision()), bp);
-            }
-        } else {
-            ConcurrentHashMap<Integer, Stack> x = new ConcurrentHashMap<Integer, Stack>();
-            x.put(new Integer(bp.getRevision()), bp);
-            this.stacks.put(bp.getName(), x);
-        }
-        
+        validateAndSetStackDefaults(stackName, bp);
+        int latestStackRevision = dataStore.storeStack(stackName, bp);
+        this.stacks.put(stackName, new Integer(latestStackRevision));
         return bp;
     }
     
     /*
      * Import the default stack from the URL location
      */
-    public Stack importDefaultStack (String locationURL) throws IOException {
+    public Stack importDefaultStack (String stackName, String locationURL) throws IOException {
         Stack stack;
         URL stackUrl;
         try {
@@ -171,7 +178,7 @@ public class Stacks {
             JAXBContext jc = JAXBContext.newInstance(org.apache.ambari.common.rest.entities.Stack.class);
             Unmarshaller u = jc.createUnmarshaller();
             stack = (Stack)u.unmarshal(is);
-            return addStack(stack);
+            return addStack(stackName, stack);
         } catch (WebApplicationException we) {
             throw we;
         } catch (Exception e) {
@@ -182,12 +189,15 @@ public class Stacks {
     /*
      * Validate the stack before importing into controller
      */
-    public void validateAndSetStackDefaults(Stack stack) throws IOException {
+    public void validateAndSetStackDefaults(String stackName, Stack stack) throws Exception {
         
         if (stack.getName() == null || stack.getName().equals("")) {
-            String msg = "Stack must be associated with non-empty name";
+            stack.setName(stackName);
+        } else if (!stack.getName().equals(stackName)) { 
+            String msg = "Name of stack in resource URL and stack definition does not match!";
             throw new WebApplicationException ((new ExceptionResponse(msg, Response.Status.BAD_REQUEST)).get());
         }
+        
         if (stack.getRevision() == null || stack.getRevision().equals("") ||
             stack.getRevision().equalsIgnoreCase("null")) {
             stack.setRevision("-1");
@@ -203,7 +213,7 @@ public class Stacks {
         /*
          * Set the creation time 
          */
-        stack.setCreationTime(new Date());
+        stack.setCreationTime(Util.getXMLGregorianCalendar(new Date()));
     }
     
     /*
@@ -215,12 +225,11 @@ public class Stacks {
             String msg = "Stack ["+stackName+"] does not exist";
             throw new WebApplicationException ((new ExceptionResponse(msg, Response.Status.NOT_FOUND)).get());
         }
-        ConcurrentHashMap<Integer, Stack> revisions = this.stacks.get(stackName);
-        for (Integer x : revisions.keySet()) {
-            // Get the latest stack
-            Stack bp = revisions.get(x);
+        
+        for (int rev=0; rev<=this.stacks.get(stackName); rev++) {
+            // Get the stack
+            Stack bp = dataStore.retrieveStack(stackName, rev);
             StackInformation bpInfo = new StackInformation();
-            // TODO: get the creation time from stack
             bpInfo.setCreationTime(bp.getCreationTime());
             bpInfo.setName(bp.getName());
             bpInfo.setRevision(bp.getRevision());
@@ -244,7 +253,7 @@ public class Stacks {
         List<StackInformation> list = new ArrayList<StackInformation>();
         for (String bpName : this.stacks.keySet()) {
             // Get the latest stack
-            Stack bp = this.getStack(bpName, -1);
+            Stack bp = dataStore.retrieveStack(bpName, -1);
             StackInformation bpInfo = new StackInformation();
             // TODO: get the creation and update times from stack
             bpInfo.setCreationTime(bp.getCreationTime());
@@ -264,16 +273,15 @@ public class Stacks {
     }
     
     /*
-     * Delete the specified version of stack
-     * TODO: Check if stack is associated with any stack... 
+     * Delete the stack including all its versions
      */
-    public void deleteStack(String stackName, int revision) throws Exception {
+    public void deleteStack(String stackName) throws Exception {
         
         /*
-         * Check if the specified stack revision is used in any cluster definition
+         * Check if the specified stack is used in any cluster definition
          */
-        Hashtable<String, String> clusterReferencedBPList = getClusterReferencedStacksList();
-        if (clusterReferencedBPList.containsKey(stackName+"-"+revision)) {
+        Hashtable<String, String> clusterReferencedStackList = getClusterReferencedStacksList();
+        if (clusterReferencedStackList.containsKey(stackName)) {
             String msg = "One or more clusters are associated with the specified stack";
             throw new WebApplicationException((new ExceptionResponse(msg, Response.Status.NOT_ACCEPTABLE)).get());
         }
@@ -281,29 +289,30 @@ public class Stacks {
         /*
          * If no cluster is associated then remove the stack
          */
-        this.stacks.get(stackName).remove(revision);
-        if (this.stacks.get(stackName).keySet().isEmpty()) {
-            this.stacks.remove(stackName);
-        }    
+        dataStore.deleteStack(stackName);
+        this.stacks.remove(stackName);
     }
     
     /*
-     * Returns the <key="name-revision", value=""> hash table for cluster referenced stacks
+     * Returns the <key="name", value="revision"> hash table for cluster referenced stacks
+     * This would include any indirectly referenced parent stacks as well
      */
     public Hashtable<String, String> getClusterReferencedStacksList() throws Exception {
         Hashtable<String, String> clusterStacks = new Hashtable<String, String>();
-        for (Cluster c : Clusters.getInstance().operational_clusters.values()) {
+        List<String> clusterNames = dataStore.retrieveClusterList();
+        for (String clsName : clusterNames) {
+            Cluster c = Clusters.getInstance().getClusterByName(clsName);
             String cBPName = c.getClusterDefinition(-1).getStackName();
             String cBPRevision = c.getClusterDefinition(-1).getStackRevision();
-            Stack bpx = this.getStack(cBPName, Integer.parseInt(cBPRevision));
-            clusterStacks.put(cBPName+"-"+cBPRevision, "");
+            clusterStacks.put(cBPName, cBPRevision); 
+            Stack bpx = this.getStack(cBPName, Integer.parseInt(cBPRevision));      
             while (bpx.getParentName() != null) {
                 if (bpx.getParentRevision() == null) {
                     bpx = this.getStack(bpx.getParentName(), -1);
                 } else {
                     bpx = this.getStack(bpx.getParentName(), Integer.parseInt(bpx.getParentRevision()));
                 }
-                clusterStacks.put(bpx.getName()+"-"+bpx.getRevision(), "");
+                clusterStacks.put(bpx.getName(), bpx.getRevision());
             }
         }
         return clusterStacks;
@@ -339,5 +348,11 @@ public class Stacks {
             is.close();
         }
     }
-    
+
+    public void recoverStacksAfterRestart() throws IOException {
+        List<String> stackList = dataStore.retrieveStackList();
+        for (String stackName : stackList) {
+            this.stacks.put(stackName, dataStore.retrieveLatestStackRevisionNumber(stackName));
+        }
+    }
 }

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/StacksResource.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/StacksResource.java?rev=1206648&r1=1206647&r2=1206648&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/StacksResource.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/StacksResource.java Sun Nov 27 00:31:25 2011
@@ -144,14 +144,9 @@ public class StacksResource {
     @DELETE
     @Path("{stackName}")
     @Consumes({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML})
-    public Response deletestack(@PathParam("stackName") String stackName,
-                                @DefaultValue("") @QueryParam("revision") String revision ) throws Exception {     
+    public Response deletestack(@PathParam("stackName") String stackName) throws Exception {     
         try {
-            if (revision == null || revision.equals("")) {
-                String msg = "Revision number not specified";
-                throw new WebApplicationException ((new ExceptionResponse(msg, Response.Status.BAD_REQUEST)).get());
-            }
-            Stacks.getInstance().deleteStack(stackName, Integer.parseInt(revision));
+            Stacks.getInstance().deleteStack(stackName);
             return Response.ok().build();
         }catch (WebApplicationException we) {
             throw we;
@@ -189,15 +184,14 @@ public class StacksResource {
                                      Stack stack) throws Exception {
         try {
             if (locationURL == null || locationURL.equals("")) {
-                return Stacks.getInstance().addStack(stack);
+                return Stacks.getInstance().addStack(stackName, stack);
             } else {
-                return Stacks.getInstance().importDefaultStack (locationURL);
+                return Stacks.getInstance().importDefaultStack (stackName, locationURL);
             }
         }catch (WebApplicationException we) {
             throw we;
         }catch (Exception e) {
             throw new WebApplicationException((new ExceptionResponse(e)).get());
         } 
-    }
-    
+    } 
 }

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/PersistentDataStore.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/PersistentDataStore.java?rev=1206648&r1=1206647&r2=1206648&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/PersistentDataStore.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/PersistentDataStore.java Sun Nov 27 00:31:25 2011
@@ -49,41 +49,17 @@ public interface PersistentDataStore {
      */
     public ClusterDefinition retrieveClusterDefinition (String clusterName, int revision) throws IOException;
     
-    public static class NameRevisionPair {
-        public String name;
-        public int maxRevision;
-        public NameRevisionPair(String name, int maxRevision) {
-            this.name = name;
-            this.maxRevision = maxRevision;
-        }
-    }
-  
     /**
      * Retrieve list of existing cluster names
      */
     public List<String> retrieveClusterList () throws IOException;
-    
-    
+      
     /**
      * Delete cluster entry
      */
     public void deleteCluster (String clusterName) throws IOException;
     
     /**
-     * Delete cluster revision(s) less than specified version number 
-     */
-    public void purgeClusterDefinitionRevisions(String clusterName, int lessThanRevision) throws IOException;
-    
-    /**
-     * Store/update the cluster state
-     * Cluster entry should pre-exist else return error
-     * Cluster state is updated in place i.e. only laster cluster state is preserved
-     * Fields not to be updated should be initialized to null in the ClusterState object
-     */
-    public void updateClusterState (String clusterName, ClusterState newstate) throws IOException;
-    
-    
-    /**
      * Store the stack configuration.
      * If stack does not exist, create new one else create new revision
      * Return the new stack revision 
@@ -97,54 +73,20 @@ public interface PersistentDataStore {
     public Stack retrieveStack (String stackName, int revision) throws IOException;
     
     /**
-     * Retrieve all cluster definitions with their latest revisions
-     * 
+     * Retrieve list of stack names
+     * @return
+     * @throws IOException
      */
-    public List<NameRevisionPair> retrieveStackList () throws IOException;
+    public List<String> retrieveStackList() throws IOException;
     
     /**
-     * Delete stack
+     * Get Latest stack Revision Number
      */
-    public int deleteStack(String stackName) throws IOException;
+    public int retrieveLatestStackRevisionNumber(String stackName) throws IOException;
     
     /**
-     * Delete stack revision(s) less than specified version number 
-     */
-    public void deleteStackRevisions(String stackName, int lessThanRevision) throws IOException;
-    
-    /**
-     * Update the component state.
-     * If component entry does not exist for given cluster it will be created 
-     */
-    public void updateComponentState (String clusterName, String componentName, String state) throws IOException;
-    
-    /**
-     * Get the component state.
-     * Returns null, if specific component is not associated with the cluster
-     */
-    public String getComponentState (String clusterName, String componentName) throws IOException;
-
-    
-    /**
-     * Delete the component state for specified component 
-     */
-    public void deleteComponentState (String clusterName, String componentName) throws IOException;
- 
-    /**
-     * Update the component role state.
-     * If role entry does not exist for given cluster component it will be created 
-     */
-    public void updateRoleState (String clusterName, String componentName, String roleName, String state) throws IOException;
-    
-    /**
-     * Get the role state.
-     * Returns null, if specific component/role is not associated with the cluster
+     * Delete stack
      */
-    public String getRoleState (String clusterName, String componentName, String RoleName) throws IOException;
-
+    public void deleteStack(String stackName) throws IOException;
     
-    /**
-     * Delete the role state for specified component 
-     */
-    public void deleteRoleState (String clusterName, String componentName, String roleName) throws IOException;
 }

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/impl/ZookeeperDS.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/impl/ZookeeperDS.java?rev=1206648&r1=1206647&r2=1206648&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/impl/ZookeeperDS.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/impl/ZookeeperDS.java Sun Nov 27 00:31:25 2011
@@ -2,13 +2,11 @@ package org.apache.ambari.datastore.impl
 
 import java.io.IOException;
 import java.util.List;
-import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.ambari.common.rest.entities.ClusterDefinition;
 import org.apache.ambari.common.rest.entities.ClusterState;
 import org.apache.ambari.common.rest.entities.Stack;
 import org.apache.ambari.common.util.JAXBUtil;
-import org.apache.ambari.controller.Stacks;
 import org.apache.ambari.datastore.PersistentDataStore;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
@@ -112,6 +110,10 @@ public class ZookeeperDS implements Pers
                 String latestRevision = new String (zk.getData(clusterLatestRevisionNumberPath, false, stat));
                 newRev = Integer.parseInt(latestRevision) + 1;
                 clusterRevisionPath = clusterPath + "/" + newRev;
+                /*
+                 * If client passes the revision number of the checked out cluster definition 
+                 * following code checks if you are updating the same version that you checked out.
+                 */
                 if (clusterDef.getRevision() != null) {
                     if (!latestRevision.equals(clusterDef.getRevision())) {
                         throw new IOException ("Latest cluster definition does not match the one client intends to modify!");
@@ -229,92 +231,113 @@ public class ZookeeperDS implements Pers
     }
 
     @Override
-    public void purgeClusterDefinitionRevisions(String clusterName,
-            int lessThanRevision) throws IOException {
-        // TODO Auto-generated method stub
-        
-    }
-
-    @Override
-    public void updateClusterState(String clusterName, ClusterState newstate)
-            throws IOException {
-        // TODO Auto-generated method stub
-        
-    }
-
-    @Override
     public int storeStack(String stackName, Stack stack) throws IOException {
-        // TODO Auto-generated method stub
-        return 0;
+        try {
+            Stat stat = new Stat();
+            String stackPath = ZOOKEEPER_STACKS_ROOT_PATH+"/"+stackName;
+            int newRev = 0;
+            String stackRevisionPath = stackPath+"/"+newRev;
+            String stackLatestRevisionNumberPath = stackPath+"/latestRevisionNumber";
+            if (zk.exists(stackPath, false) == null) {
+                /* 
+                 * create stack path with revision 0, create stack latest revision node to
+                 * store the latest revision of stack definition.
+                 */
+                createDirectory (stackPath, new byte[0], false);
+                stack.setRevision(new Integer(newRev).toString());
+                createDirectory (stackRevisionPath, JAXBUtil.write(stack), false);
+                createDirectory (stackLatestRevisionNumberPath, (new Integer(newRev)).toString().getBytes(), false);
+            }else {
+                String latestRevision = new String (zk.getData(stackLatestRevisionNumberPath, false, stat));
+                newRev = Integer.parseInt(latestRevision) + 1;
+                stackRevisionPath = stackPath + "/" + newRev;
+                /*
+                 * TODO: like cluster definition client can pass optionally the checked out version number
+                 * Following code checks if you are updating the same version that you checked out.
+                if (stack.getRevision() != null) {
+                    if (!latestRevision.equals(stack.getRevision())) {
+                        throw new IOException ("Latest cluster definition does not match the one client intends to modify!");
+                    }  
+                } */
+                stack.setRevision(new Integer(newRev).toString());
+                createDirectory (stackRevisionPath, JAXBUtil.write(stack), false);
+                zk.setData(stackLatestRevisionNumberPath, (new Integer(newRev)).toString().getBytes(), -1);
+            }
+            return newRev;
+        } catch (KeeperException e) {
+            throw new IOException (e);
+        } catch (InterruptedException e1) {
+            throw new IOException (e1);
+        }
     }
 
     @Override
     public Stack retrieveStack(String stackName, int revision)
             throws IOException {
-        // TODO Auto-generated method stub
-        return null;
-    }
-
-    @Override
-    public List<NameRevisionPair> retrieveStackList() throws IOException {
-        // TODO Auto-generated method stub
-        return null;
-    }
-
-    @Override
-    public int deleteStack(String stackName) throws IOException {
-        // TODO Auto-generated method stub
-        return 0;
-    }
-
-    @Override
-    public void deleteStackRevisions(String stackName, int lessThanRevision)
-            throws IOException {
-        // TODO Auto-generated method stub
-        
-    }
-
-    @Override
-    public void updateComponentState(String clusterName, String componentName,
-            String state) throws IOException {
-        // TODO Auto-generated method stub
-        
-    }
-
-    @Override
-    public String getComponentState(String clusterName, String componentName)
-            throws IOException {
-        // TODO Auto-generated method stub
-        return null;
+        try {
+            Stat stat = new Stat();
+            String stackRevisionPath;
+            if (revision < 0) {   
+                String stackLatestRevisionNumberPath = ZOOKEEPER_STACKS_ROOT_PATH+"/"+stackName+"/latestRevisionNumber";
+                String latestRevisionNumber = new String (zk.getData(stackLatestRevisionNumberPath, false, stat));
+                stackRevisionPath = ZOOKEEPER_STACKS_ROOT_PATH+"/"+stackName+"/"+latestRevisionNumber;       
+            } else {
+                stackRevisionPath = ZOOKEEPER_STACKS_ROOT_PATH+"/"+stackName+"/"+revision;
+            }
+            Stack stack = JAXBUtil.read(zk.getData(stackRevisionPath, false, stat), Stack.class); 
+            return stack;
+        } catch (Exception e) {
+            throw new IOException (e);
+        }
     }
 
     @Override
-    public void deleteComponentState(String clusterName, String componentName)
-            throws IOException {
-        // TODO Auto-generated method stub
-        
+    public List<String> retrieveStackList() throws IOException {
+        try {
+            List<String> children = zk.getChildren(ZOOKEEPER_STACKS_ROOT_PATH, false);
+            return children;
+        } catch (KeeperException e) {
+            throw new IOException (e);
+        } catch (InterruptedException e) {
+            throw new IOException (e);
+        }
     }
-
+    
     @Override
-    public void updateRoleState(String clusterName, String componentName,
-            String roleName, String state) throws IOException {
-        // TODO Auto-generated method stub
-        
+    public int retrieveLatestStackRevisionNumber(String stackName) throws IOException { 
+        int revisionNumber;
+        try {
+            Stat stat = new Stat();
+            String stackLatestRevisionNumberPath = ZOOKEEPER_STACKS_ROOT_PATH+"/"+stackName+"/latestRevisionNumber";
+            String latestRevisionNumber = new String (zk.getData(stackLatestRevisionNumberPath, false, stat));
+            revisionNumber = Integer.parseInt(latestRevisionNumber);
+        } catch (Exception e) {
+            throw new IOException (e);
+        }
+        return revisionNumber;
     }
 
     @Override
-    public String getRoleState(String clusterName, String componentName,
-            String RoleName) throws IOException {
-        // TODO Auto-generated method stub
-        return null;
+    public void deleteStack(String stackName) throws IOException {
+        String stackPath = ZOOKEEPER_STACKS_ROOT_PATH+"/"+stackName;
+        List<String> children;
+        try {
+            children = zk.getChildren(stackPath, false);
+            // Delete all the children and then the parent node
+            for (String childPath : children) {
+                try {
+                    zk.delete(childPath, -1);
+                } catch (KeeperException.NoNodeException ke) {
+                } catch (Exception e) { throw new IOException (e); }
+            }
+            zk.delete(stackPath, -1);
+        } catch (KeeperException.NoNodeException ke) {
+            return;
+        } catch (Exception e) {
+            throw new IOException (e);
+        }
     }
 
-    @Override
-    public void deleteRoleState(String clusterName, String componentName,
-            String roleName) throws IOException {
-        // TODO Auto-generated method stub
-        
-    }
 
     @Override
     public void process(WatchedEvent event) {

Added: incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/puppet1-0.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/puppet1-0.xml?rev=1206648&view=auto
==============================================================================
--- incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/puppet1-0.xml (added)
+++ incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/puppet1-0.xml Sun Nov 27 00:31:25 2011
@@ -0,0 +1,265 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<stack name="puppet1" revision="0" parentRevision="-1" creationTime="2011-11-23T18:16:39.604-08:00">
+    <repositories kind="TAR">
+        <urls>http://www.apache.org/dist/hadoop/common/</urls>
+    </repositories>
+    <configuration>
+        <category name="ambari">
+            <property name="ambari.cluster.name" value="IMPLICIT" />   
+            <property name="ambari.namenode.principal" value="nn" />
+            <property name="ambari.datanode.principal" value="dn" />
+            <property name="ambari.jobtracker.principal" value="jt" />
+            <property name="ambari.tasktracker.principal" value="tt" />
+            <property name="ambari.hbasemaster.principal" value="hm" />
+            <property name="ambari.regionserver.principal" value="rs" />
+            <property name="ambari.hcat.principal" value="hcat" />
+
+            <property name="ambari.hdfs.user" value="hdfs" />
+            <property name="ambari.mapreduce.user" value="mapred" />
+            <property name="ambari.hbase.user" value="hrt_hbase" />
+            <property name="ambari.hcat.user" value="hcat" />
+            <property name="ambari.admin.group" value="hadoop" />
+
+            <property name="ambari.data.prefix" value="ambari" />
+            <property name="ambari.cluster.prefix" value="ambari" />   
+
+            <property name="ambari.service.realm" value="${ambari.user.realm}" />  
+            <property name="ambari.service.realm" value="\${local.realm}" /> 
+            <property name="ambari.webauthfilter" value="org.apache.hadoop.http.lib.StaticUserWebFilter"/>
+            <property name="ambari.hadoop_conf_dir" value="/etc/hadoop"/>
+        </category>
+    </configuration>
+    <components name="common" architecture="x86_64" version="0.20.205.0" provider="org.apache.hadoop">
+        <definition provider="org.apache.ambari" name="hadoop-common" version="0.1.0"/>
+    </components>
+    <components name="hdfs" architecture="x86_64" version="0.20.205.0" provider="org.apache.hadoop">
+        <definition provider="org.apache.ambari" name="hadoop-hdfs" version="0.1.0"/>
+        <roles name="namenode">
+          <configuration>
+            <category name="mapred-site">
+                <property name="mapred.tasktracker.tasks.sleeptime-before-sigkill" value="250" />
+                <property name="*mapred.system.dir" value="/mapred/mapredsystem" />  
+                <property name="*mapred.job.tracker" value="${ambari.mapred.jobtracker.host}:9000" />
+                <property name="*mapred.job.tracker.http.address" value="${ambari.mapred.jobtracker.host}:50030" />
+                <property name="*mapred.local.dir" value="${ambari.cluster.prefix}/lib/hadoop/mapred,${ambari.cluster.prefix}/lib/hadoop/mapred" /> 
+                <property name="mapreduce.cluster.administrators" value="${ambari.mapreduce.user}" />
+                <property name="mapred.map.tasks.speculative.execution" value="false" />
+                <property name="mapred.reduce.tasks.speculative.execution" value="false" />
+                <property name="mapred.output.compression.type" value="BLOCK" />
+                <property name="jetty.connector" value="org.mortbay.jetty.nio.SelectChannelConnector" />
+                <property name="mapred.task.tracker.task-controller" value="org.apache.hadoop.mapred.DefaultTaskController" />
+                <property name="mapred.child.root.logger" value="INFO,TLA" />
+                <property name="mapred.child.java.opts" value="-server -Xmx640m -Djava.net.preferIPv4Stack=true" />
+                <property name="mapred.child.ulimit" value="8388608" />
+                <property name="mapred.job.tracker.persist.jobstatus.active" value="true" />
+                <property name="mapred.job.tracker.persist.jobstatus.dir" value="file:////var/mount1/log/hadoop/${ambari.mapreduce.user}/jobstatus" /> 
+                <property name="mapred.job.tracker.history.completed.location" value="/mapred/history/done" /> 
+                <property name="mapred.heartbeats.in.second" value="200" />
+                <property name="mapreduce.tasktracker.outofband.heartbeat" value="true" />
+                <property name="*mapred.jobtracker.maxtasks.per.job" value="200000" />
+                <property name="mapreduce.jobtracker.kerberos.principal" value="${ambari.jobtracker.principal}/_HOST@${ambari.service.realm}" />
+                <property name="mapreduce.tasktracker.kerberos.principal" value="${ambari.tasktracker.principal}/_HOST@${ambari.service.realm}" />
+                <property name="hadoop.job.history.user.location" value="none" />
+                <property name="mapreduce.jobtracker.keytab.file" value="/etc/security/keytabs/${ambari.jobtracker.principal}.service.keytab" />    
+                <property name="mapreduce.tasktracker.keytab.file" value="/etc/security/keytabs/${ambari.tasktracker.principal}.service.keytab" />  
+                <property name="mapreduce.jobtracker.staging.root.dir" value="/user" />
+                <property name="mapreduce.job.acl-modify-job" value="" />
+                <property name="mapreduce.job.acl-view-job" value="Dr.Who" />
+                <property name="mapreduce.tasktracker.group" value="${ambari.admin.group}" />
+                <property name="mapred.acls.enabled" value="true" />
+                <property name="mapred.jobtracker.taskScheduler" value="org.apache.hadoop.mapred.CapacityTaskScheduler" />
+                <property name="mapred.queue.names" value="default" />
+                <property name="mapreduce.history.server.embedded" value="false" />
+                <property name="mapreduce.history.server.http.address" value="${ambari.mapred.jobtracker.host}:51111" />
+                <property name="mapreduce.jobhistory.kerberos.principal" value="${ambari.jobtracker.principal}/_HOST@${ambari.service.realm}" />
+                <property name="mapreduce.jobhistory.keytab.file" value="/etc/security/keytabs/${ambari.jobtracker.principal}.service.keytab" />
+                <property name="mapred.hosts" value="${ambari.HADOOP_CONF_DIR}/mapred.include" />
+                <property name="mapred.hosts.exclude" value="${ambari.HADOOP_CONF_DIR}/mapred.exclude" />
+                <property name="mapred.jobtracker.retirejob.check" value="10000" />
+                <property name="mapred.jobtracker.retirejob.interval" value="0" />
+            </category>
+
+            <category name="core-site">
+                <property name="local.realm" value="KERBEROS.EXAMPLE.COM" /> 
+                <property name="fs.default.name" value="hdfs://${ambari.hdfs.namenode.host}:8020" />
+                <property name="fs.trash.interval" value="360" />
+                <property name="hadoop.security.auth_to_local" value="RULE:[1:$1@$0](.*@${ambari.user.realm})s/@.*// RULE:[2:$1@$0](${ambari.jobtracker.principal}@${ambari.service.realm})s/.*/${ambari.mapreduce.user}/ RULE:[2:$1@$0](${ambari.tasktracer.principal}@${ambari.service.realm})s/.*/${ambari.mapreduce.user}/ RULE:[2:$1@$0](${ambari.namenode.principal}@${ambari.service.realm})s/.*/${ambari.hdfs.user}/ RULE:[2:$1@$0](${ambari.datanode.principal}@${ambari.service.realm})s/.*/${ambari.hdfs.user}/ RULE:[2:$1@$0](${ambari.hbasemaster.principal}@${ambari.service.realm})s/.*/${ambari.hbase.user}/ RULE:[2:$1@$0](${ambari.regionserver.principal}@${ambari.service.realm})s/.*/${ambari.hbase.user}/ RULE:[2:$1@$0](${ambari.hcat.principal}@${ambari.service.realm})s/.*/${ambari.hcat.user}/" />
+                <property name="hadoop.security.authentication" value="simple" />
+                <property name="hadoop.security.authorization" value="false" />
+                <property name="hadoop.security.groups.cache.secs" value="14400" />
+                <property name="hadoop.kerberos.kinit.command" value="/usr/kerberos/bin/kinit" />
+                <property name="hadoop.http.filter.initializers" value="${ambari.webauthfilter}" />
+            </category>
+
+            <category name="hdfs-site">
+                <property name="*dfs.name.dir"  value="${ambari.cluster.prefix}/data/namenode" />
+                <property name="*dfs.data.dir" value="/var/mount1/lib/hadoop/hdfs/datanode,/var/mount2/lib/hadoop/hdfs/datanode" />
+                <property name="dfs.safemode.threshold.pct" value="1.0f" />
+                <property name="dfs.datanode.address" value="0.0.0.0:50010" />
+                <property name="dfs.datanode.http.address" value="0.0.0.0:50075" />
+                <property name="*dfs.http.address" value="${ambari.hdfs.namenode.host}:50070" />
+                <property name="dfs.umaskmode" value="077" />
+
+                <property name="dfs.block.access.token.enable" value="false" />
+                <property name="dfs.namenode.kerberos.principal" value="${ambari.namenode.principal}/_HOST@${ambari.service.realm}" />
+                <property name="dfs.namenode.kerberos.https.principal" value="host/_HOST@${ambari.service.realm}" />
+                <property name="dfs.secondary.namenode.kerberos.principal" value="${ambari.namenode.principal}/_HOST@${ambari.service.realm}" />
+                <property name="dfs.secondary.namenode.kerberos.https.principal" value="host/_HOST@${ambari.service.realm}" />
+                <property name="dfs.datanode.kerberos.principal" value="${ambari.datanode.principal}/_HOST@${ambari.service.realm}" />
+                <property name="dfs.web.authentication.kerberos.principal" value="HTTP/_HOST@${ambari.service.realm}" />
+
+                <property name="dfs.web.authentication.kerberos.keytab" value="/etc/security/keytabs/${ambari.namenode.principal}.service.keytab" />
+                <property name="dfs.namenode.keytab.file" value="/etc/security/keytabs/${ambari.namenode.principal}.service.keytab" />
+                <property name="dfs.secondary.namenode.keytab.file" value="/etc/security/keytabs/${ambari.namenode.principal}.service.keytab" />
+                <property name="dfs.datanode.keytab.file" value="/etc/security/keytabs/${ambari.datanode.principal}.service.keytab" />
+
+                <property name="dfs.secondary.https.port" value="50490" />
+                <property name="dfs.https.port" value="50470" />
+                <property name="dfs.https.address" value="${ambari.hdfs.namenode.host}:50470" />
+                <property name="dfs.datanode.data.dir.perm" value="700" />
+                <property name="dfs.cluster.administrators" value="${ambari.hdfs.user}" />
+                <property name="dfs.permissions.superusergroup" value="${ambari.admin.group}" />
+                <property name="dfs.secondary.http.address" value="${ambari.hdfs.namenode.host}:50090" />
+                <property name="dfs.hosts" value="${ambari.HADOOP_CONF_DIR}/dfs.include" />
+                <property name="dfs.hosts.exclude" value="${ambari.HADOOP_CONF_DIR}/dfs.exclude" />
+                <property name="dfs.webhdfs.enabled" value="true" />
+                <property name="dfs.support.append" value="true" />
+            </category>
+            <category name="hadoop-env">
+                <property name="JAVA_HOME" value="${ambari.cluster.prefix}/stack/share/java" />
+                <property name="HADOOP_CONF_DIR" value="${ambari.HADOOP_CONF_DIR}" />   
+                <property name="HADOOP_OPTS" value="-Djava.net.preferIPv4Stack=true $HADOOP_OPTS" />
+                <property name="HADOOP_NAMENODE_OPTS" value="-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS" />
+                <property name="HADOOP_SECONDARYNAMENODE_OPTS" value="-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS" />
+                <property name="HADOOP_JOBTRACKER_OPTS" value="-Dsecurity.audit.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA $HADOOP_JOBTRACKER_OPTS" />
+                <property name="HADOOP_TASKTRACKER_OPTS" value="-Dsecurity.audit.logger=ERROR,console -Dmapred.audit.logger=ERROR,console $HADOOP_TASKTRACKER_OPTS" />
+                <property name="HADOOP_DATANODE_OPTS" value="-Dsecurity.audit.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS" />
+                <property name="HADOOP_CLIENT_OPTS" value="-Xmx128m $HADOOP_CLIENT_OPTS" />
+                <property name="HADOOP_SECURE_DN_USER" value="" />
+                <property name="HADOOP_LOG_DIR" value="${ambari.cluster.prefix}/log/hadoop/$USER" />   
+                <property name="HADOOP_SECURE_DN_LOG_DIR" value="${ambari.cluster.prefix}/log/hadoop/${ambari.hdfs.user}" /> 
+                <property name="HADOOP_PID_DIR" value="${ambari.cluster.prefix}/run/hadoop" /> 
+                <property name="HADOOP_SECURE_DN_PID_DIR" value="${ambari.cluster.prefix}/run/hadoop" /> 
+                <property name="HADOOP_IDENT_STRING" value="${ambari.cluster.name}" />
+            </category>
+            <category name="hadoop_metrics2">
+                <property name="*.period" value="60" />
+            </category>
+          </configuration>
+        </roles>
+        <roles name="datanode">
+          <configuration>
+            <category name="mapred-site">
+                <property name="mapred.tasktracker.tasks.sleeptime-before-sigkill" value="250" />
+                <property name="*mapred.system.dir" value="/mapred/mapredsystem" />  
+                <property name="*mapred.job.tracker" value="${ambari.mapred.jobtracker.host}:9000" />
+                <property name="*mapred.job.tracker.http.address" value="${ambari.mapred.jobtracker.host}:50030" />
+                <property name="*mapred.local.dir" value="${ambari.cluster.prefix}/lib/hadoop/mapred,${ambari.cluster.prefix}/lib/hadoop/mapred" /> 
+                <property name="mapreduce.cluster.administrators" value="${ambari.mapreduce.user}" />
+                <property name="mapred.map.tasks.speculative.execution" value="false" />
+                <property name="mapred.reduce.tasks.speculative.execution" value="false" />
+                <property name="mapred.output.compression.type" value="BLOCK" />
+                <property name="jetty.connector" value="org.mortbay.jetty.nio.SelectChannelConnector" />
+                <property name="mapred.task.tracker.task-controller" value="org.apache.hadoop.mapred.DefaultTaskController" />
+                <property name="mapred.child.root.logger" value="INFO,TLA" />
+                <property name="mapred.child.java.opts" value="-server -Xmx640m -Djava.net.preferIPv4Stack=true" />
+                <property name="mapred.child.ulimit" value="8388608" />
+                <property name="mapred.job.tracker.persist.jobstatus.active" value="true" />
+                <property name="mapred.job.tracker.persist.jobstatus.dir" value="file:////var/mount1/log/hadoop/${ambari.mapreduce.user}/jobstatus" /> 
+                <property name="mapred.job.tracker.history.completed.location" value="/mapred/history/done" /> 
+                <property name="mapred.heartbeats.in.second" value="200" />
+                <property name="mapreduce.tasktracker.outofband.heartbeat" value="true" />
+                <property name="*mapred.jobtracker.maxtasks.per.job" value="200000" />
+                <property name="mapreduce.jobtracker.kerberos.principal" value="${ambari.jobtracker.principal}/_HOST@${ambari.service.realm}" />
+                <property name="mapreduce.tasktracker.kerberos.principal" value="${ambari.tasktracker.principal}/_HOST@${ambari.service.realm}" />
+                <property name="hadoop.job.history.user.location" value="none" />
+                <property name="mapreduce.jobtracker.keytab.file" value="/etc/security/keytabs/${ambari.jobtracker.principal}.service.keytab" />    
+                <property name="mapreduce.tasktracker.keytab.file" value="/etc/security/keytabs/${ambari.tasktracker.principal}.service.keytab" />  
+                <property name="mapreduce.jobtracker.staging.root.dir" value="/user" />
+                <property name="mapreduce.job.acl-modify-job" value="" />
+                <property name="mapreduce.job.acl-view-job" value="Dr.Who" />
+                <property name="mapreduce.tasktracker.group" value="${ambari.admin.group}" />
+                <property name="mapred.acls.enabled" value="true" />
+                <property name="mapred.jobtracker.taskScheduler" value="org.apache.hadoop.mapred.CapacityTaskScheduler" />
+                <property name="mapred.queue.names" value="default" />
+                <property name="mapreduce.history.server.embedded" value="false" />
+                <property name="mapreduce.history.server.http.address" value="${ambari.mapred.jobtracker.host}:51111" />
+                <property name="mapreduce.jobhistory.kerberos.principal" value="${ambari.jobtracker.principal}/_HOST@${ambari.service.realm}" />
+                <property name="mapreduce.jobhistory.keytab.file" value="/etc/security/keytabs/${ambari.jobtracker.principal}.service.keytab" />
+                <property name="mapred.hosts" value="${ambari.HADOOP_CONF_DIR}/mapred.include" />
+                <property name="mapred.hosts.exclude" value="${ambari.HADOOP_CONF_DIR}/mapred.exclude" />
+                <property name="mapred.jobtracker.retirejob.check" value="10000" />
+                <property name="mapred.jobtracker.retirejob.interval" value="0" />
+            </category>
+
+            <category name="core-site">
+                <property name="local.realm" value="KERBEROS.EXAMPLE.COM" /> 
+                <property name="fs.default.name" value="hdfs://${ambari.hdfs.namenode.host}:8020" />
+                <property name="fs.trash.interval" value="360" />
+                <property name="hadoop.security.auth_to_local" value="RULE:[1:$1@$0](.*@${ambari.user.realm})s/@.*// RULE:[2:$1@$0](${ambari.jobtracker.principal}@${ambari.service.realm})s/.*/${ambari.mapreduce.user}/ RULE:[2:$1@$0](${ambari.tasktracer.principal}@${ambari.service.realm})s/.*/${ambari.mapreduce.user}/ RULE:[2:$1@$0](${ambari.namenode.principal}@${ambari.service.realm})s/.*/${ambari.hdfs.user}/ RULE:[2:$1@$0](${ambari.datanode.principal}@${ambari.service.realm})s/.*/${ambari.hdfs.user}/ RULE:[2:$1@$0](${ambari.hbasemaster.principal}@${ambari.service.realm})s/.*/${ambari.hbase.user}/ RULE:[2:$1@$0](${ambari.regionserver.principal}@${ambari.service.realm})s/.*/${ambari.hbase.user}/ RULE:[2:$1@$0](${ambari.hcat.principal}@${ambari.service.realm})s/.*/${ambari.hcat.user}/" />
+                <property name="hadoop.security.authentication" value="simple" />
+                <property name="hadoop.security.authorization" value="false" />
+                <property name="hadoop.security.groups.cache.secs" value="14400" />
+                <property name="hadoop.kerberos.kinit.command" value="/usr/kerberos/bin/kinit" />
+                <property name="hadoop.http.filter.initializers" value="${ambari.webauthfilter}" />
+            </category>
+
+            <category name="hdfs-site">
+                <property name="*dfs.name.dir"  value="${ambari.cluster.prefix}/data/namenode" />
+                <property name="*dfs.data.dir" value="/var/mount1/lib/hadoop/hdfs/datanode,/var/mount2/lib/hadoop/hdfs/datanode" />
+
+                <property name="dfs.safemode.threshold.pct" value="1.0f" />
+                <property name="dfs.datanode.address" value="0.0.0.0:50010" />
+                <property name="dfs.datanode.http.address" value="0.0.0.0:50075" />
+                <property name="*dfs.http.address" value="${ambari.hdfs.namenode.host}:50070" />
+                <property name="dfs.umaskmode" value="077" />
+
+                <property name="dfs.block.access.token.enable" value="false" />
+                <property name="dfs.namenode.kerberos.principal" value="${ambari.namenode.principal}/_HOST@${ambari.service.realm}" />
+                <property name="dfs.namenode.kerberos.https.principal" value="host/_HOST@${ambari.service.realm}" />
+                <property name="dfs.secondary.namenode.kerberos.principal" value="${ambari.namenode.principal}/_HOST@${ambari.service.realm}" />
+                <property name="dfs.secondary.namenode.kerberos.https.principal" value="host/_HOST@${ambari.service.realm}" />
+                <property name="dfs.datanode.kerberos.principal" value="${ambari.datanode.principal}/_HOST@${ambari.service.realm}" />
+                <property name="dfs.web.authentication.kerberos.principal" value="HTTP/_HOST@${ambari.service.realm}" />
+
+                <property name="dfs.web.authentication.kerberos.keytab" value="/etc/security/keytabs/${ambari.namenode.principal}.service.keytab" />
+                <property name="dfs.namenode.keytab.file" value="/etc/security/keytabs/${ambari.namenode.principal}.service.keytab" />
+                <property name="dfs.secondary.namenode.keytab.file" value="/etc/security/keytabs/${ambari.namenode.principal}.service.keytab" />
+                <property name="dfs.datanode.keytab.file" value="/etc/security/keytabs/${ambari.datanode.principal}.service.keytab" />
+
+                <property name="dfs.secondary.https.port" value="50490" />
+                <property name="dfs.https.port" value="50470" />
+                <property name="dfs.https.address" value="${ambari.hdfs.namenode.host}:50470" />
+                <property name="dfs.datanode.data.dir.perm" value="700" />
+                <property name="dfs.cluster.administrators" value="${ambari.hdfs.user}" />
+                <property name="dfs.permissions.superusergroup" value="${ambari.admin.group}" />
+                <property name="dfs.secondary.http.address" value="${ambari.hdfs.namenode.host}:50090" />
+                <property name="dfs.hosts" value="${ambari.HADOOP_CONF_DIR}/dfs.include" />
+                <property name="dfs.hosts.exclude" value="${ambari.HADOOP_CONF_DIR}/dfs.exclude" />
+                <property name="dfs.webhdfs.enabled" value="true" />
+                <property name="dfs.support.append" value="true" />
+            </category>
+            <category name="hadoop-env">
+                <property name="JAVA_HOME" value="${ambari.cluster.prefix}/stack/share/java" />
+                <property name="HADOOP_CONF_DIR" value="${ambari.HADOOP_CONF_DIR}" />   
+                <property name="HADOOP_OPTS" value="-Djava.net.preferIPv4Stack=true $HADOOP_OPTS" />
+                <property name="HADOOP_NAMENODE_OPTS" value="-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS" />
+                <property name="HADOOP_SECONDARYNAMENODE_OPTS" value="-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS" />
+                <property name="HADOOP_JOBTRACKER_OPTS" value="-Dsecurity.audit.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA $HADOOP_JOBTRACKER_OPTS" />
+                <property name="HADOOP_TASKTRACKER_OPTS" value="-Dsecurity.audit.logger=ERROR,console -Dmapred.audit.logger=ERROR,console $HADOOP_TASKTRACKER_OPTS" />
+                <property name="HADOOP_DATANODE_OPTS" value="-Dsecurity.audit.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS" />
+                <property name="HADOOP_CLIENT_OPTS" value="-Xmx128m $HADOOP_CLIENT_OPTS" />
+                <property name="HADOOP_SECURE_DN_USER" value="" />
+                <property name="HADOOP_LOG_DIR" value="${ambari.cluster.prefix}/log/hadoop/$USER" />   
+                <property name="HADOOP_SECURE_DN_LOG_DIR" value="${ambari.cluster.prefix}/log/hadoop/${ambari.hdfs.user}" /> 
+                <property name="HADOOP_PID_DIR" value="${ambari.cluster.prefix}/run/hadoop" /> 
+                <property name="HADOOP_SECURE_DN_PID_DIR" value="${ambari.cluster.prefix}/run/hadoop" /> 
+                <property name="HADOOP_IDENT_STRING" value="${ambari.cluster.name}" />
+            </category>
+            <category name="hadoop_metrics2">
+                <property name="*.period" value="60" />
+            </category>
+         </configuration>
+       </roles>
+    </components>
+</stack>