You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by om...@apache.org on 2011/12/12 09:21:18 UTC

svn commit: r1213162 [1/2] - in /incubator/ambari/trunk: ./ controller/src/main/java/org/apache/ambari/configuration/ controller/src/main/java/org/apache/ambari/controller/ controller/src/main/java/org/apache/ambari/controller/rest/resources/ controlle...

Author: omalley
Date: Mon Dec 12 08:21:17 2011
New Revision: 1213162

URL: http://svn.apache.org/viewvc?rev=1213162&view=rev
Log:
AMBARI-153. Introduce a 'ambari.properties' configuration file that
can specify 'data.store' and a url. It defaults to 'zk://localhost:2181/',
but can be set to 'test:/' to get the static storage. (omalley)

Added:
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/configuration/
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/configuration/ConfigurationModule.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/DataStore.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/DataStoreFactory.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/StaticDataStore.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/ZookeeperDS.java
    incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/puppet1-0.json
    incubator/ambari/trunk/controller/src/test/java/org/apache/ambari/datastore/
    incubator/ambari/trunk/controller/src/test/java/org/apache/ambari/datastore/TestStaticDataStore.java
    incubator/ambari/trunk/src/site/resources/application.html
    incubator/ambari/trunk/src/site/resources/schema1.xsd
Removed:
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/PersistentDataStore.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/impl/
Modified:
    incubator/ambari/trunk/CHANGES.txt
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Cluster.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Clusters.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Controller.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/StackFlattener.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Stacks.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/StacksResource.java
    incubator/ambari/trunk/controller/src/test/java/org/apache/ambari/controller/TestHeartbeat.java

Modified: incubator/ambari/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/CHANGES.txt?rev=1213162&r1=1213161&r2=1213162&view=diff
==============================================================================
--- incubator/ambari/trunk/CHANGES.txt (original)
+++ incubator/ambari/trunk/CHANGES.txt Mon Dec 12 08:21:17 2011
@@ -2,6 +2,10 @@ Ambari Change log
 
 Release 0.1.0 - unreleased
 
+  AMBARI-153. Introduce a 'ambari.properties' configuration file that
+  can specify 'data.store' and a url. It defaults to 'zk://localhost:2181/',
+  but can be set to 'test:/' to get the static storage. (omalley)
+
   AMBARI-152. Fixes issues in the shell scripts (ddas)
 
   AMBARI-148. Refactors StateMachineInvoker (ddas)

Added: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/configuration/ConfigurationModule.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/configuration/ConfigurationModule.java?rev=1213162&view=auto
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/configuration/ConfigurationModule.java (added)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/configuration/ConfigurationModule.java Mon Dec 12 08:21:17 2011
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.configuration;
+
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Properties;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.name.Names;
+
+/**
+ * Load a given property file into Guice as named properties.
+ */
+public class ConfigurationModule extends AbstractModule {
+
+  private static final Log LOG = LogFactory.getLog(ConfigurationModule.class);
+  private static final String AMBARI_CONF_VAR = "AMBARI_CONF_DIR";
+  private static final String CONFIG_FILE = "ambari.properties";
+
+  @Override
+  protected void configure() {
+    // set up default properties
+    Properties properties = new Properties();
+    properties.put("data.store", "zk://localhost:2181/");
+    
+    // get the configuration directory and filename
+    String confDir = System.getenv(AMBARI_CONF_VAR);
+    if (confDir == null) {
+      confDir = "/etc/ambari";
+    }
+    String filename = confDir + "/" + CONFIG_FILE;
+    
+    // load the properties
+    try {
+      properties.load(new FileInputStream(filename));
+    } catch (FileNotFoundException fnf) {
+      LOG.info("No configuration file " + filename + " found.", fnf);
+    } catch (IOException ie) {
+      throw new RuntimeException("Can't read configuration file " + filename,
+                                 ie);
+    }
+    Names.bindProperties(binder(), properties);
+  }
+
+}

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Cluster.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Cluster.java?rev=1213162&r1=1213161&r2=1213162&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Cluster.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Cluster.java Mon Dec 12 08:21:17 2011
@@ -33,7 +33,8 @@ import org.apache.ambari.common.rest.ent
 import org.apache.ambari.common.rest.entities.Configuration;
 import org.apache.ambari.components.ComponentPlugin;
 import org.apache.ambari.components.ComponentPluginFactory;
-import org.apache.ambari.datastore.PersistentDataStore;
+import org.apache.ambari.datastore.DataStoreFactory;
+import org.apache.ambari.datastore.DataStore;
 
 import com.google.inject.assistedinject.Assisted;
 import com.google.inject.assistedinject.AssistedInject;
@@ -44,7 +45,7 @@ public class Cluster {
     /*
      * Data Store 
      */
-    private final PersistentDataStore dataStore;
+    private final DataStore dataStore;
    
     /*
      * Latest revision of cluster definition
@@ -81,18 +82,18 @@ public class Cluster {
 
     @AssistedInject
     public Cluster (StackFlattener flattener,
-                    PersistentDataStore dataStore,
+                    DataStoreFactory dataStore,
                     ComponentPluginFactory plugin,
                     @Assisted String clusterName) {
         this.flattener = flattener;
-        this.dataStore = dataStore;
+        this.dataStore = dataStore.getInstance();
         this.componentPluginFactory = plugin;
         this.clusterName = clusterName;
     }
     
     @AssistedInject
     public Cluster (StackFlattener flattener,
-                    PersistentDataStore dataStore,
+                    DataStoreFactory dataStore,
                     ComponentPluginFactory plugin,
                     @Assisted ClusterDefinition c, 
                     @Assisted ClusterState cs) throws Exception {

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Clusters.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Clusters.java?rev=1213162&r1=1213161&r2=1213162&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Clusters.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Clusters.java Mon Dec 12 08:21:17 2011
@@ -42,7 +42,8 @@ import org.apache.ambari.common.rest.ent
 import org.apache.ambari.common.rest.entities.Role;
 import org.apache.ambari.common.rest.entities.RoleToNodes;
 import org.apache.ambari.common.rest.entities.Stack;
-import org.apache.ambari.datastore.PersistentDataStore;
+import org.apache.ambari.datastore.DataStoreFactory;
+import org.apache.ambari.datastore.DataStore;
 import org.apache.ambari.resource.statemachine.ClusterFSM;
 import org.apache.ambari.resource.statemachine.FSMDriverInterface;
 import org.apache.commons.logging.Log;
@@ -60,7 +61,7 @@ public class Clusters {
      * Operational clusters include both active and inactive clusters
      */
     protected ConcurrentHashMap<String, Cluster> operational_clusters = new ConcurrentHashMap<String, Cluster>();
-    private final PersistentDataStore dataStore;
+    private final DataStore dataStore;
     
     private final Stacks stacks;
     private final Nodes nodes;
@@ -70,13 +71,13 @@ public class Clusters {
         
     @Inject
     private Clusters(Stacks stacks, Nodes nodes, 
-                     PersistentDataStore dataStore,
+                     DataStoreFactory dataStore,
                      ClusterFactory clusterFactory,
                      StackFlattener flattener,
                      FSMDriverInterface fsmDriver) throws Exception {
       this.stacks = stacks;
       this.nodes = nodes;
-      this.dataStore = dataStore;
+      this.dataStore = dataStore.getInstance();
       this.clusterFactory = clusterFactory;
       this.fsmDriver = fsmDriver;
       this.flattener = flattener;

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Controller.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Controller.java?rev=1213162&r1=1213161&r2=1213162&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Controller.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Controller.java Mon Dec 12 08:21:17 2011
@@ -19,11 +19,14 @@
 package org.apache.ambari.controller;
 
 
+import java.io.IOException;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
 import org.apache.ambari.common.util.DaemonWatcher;
 import org.apache.ambari.common.util.ExceptionUtil;
+import org.apache.ambari.configuration.ConfigurationModule;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.security.Constraint;
 import org.mortbay.jetty.security.ConstraintMapping;
@@ -116,8 +119,9 @@ public class Controller {
     }
   }
 
-  public static void main(String[] args) {
-    Injector injector = Guice.createInjector(new ControllerModule());
+  public static void main(String[] args) throws IOException {
+    Injector injector = Guice.createInjector(new ConfigurationModule(),
+                                             new ControllerModule());
     DaemonWatcher.createInstance(System.getProperty("PID"), 9100);
     try {
       Controller controller = injector.getInstance(Controller.class);

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/StackFlattener.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/StackFlattener.java?rev=1213162&r1=1213161&r2=1213162&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/StackFlattener.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/StackFlattener.java Mon Dec 12 08:21:17 2011
@@ -37,13 +37,9 @@ import org.apache.ambari.common.rest.ent
 import org.apache.ambari.common.rest.entities.RepositoryKind;
 import org.apache.ambari.common.rest.entities.Role;
 import org.apache.ambari.common.rest.entities.Stack;
-import org.apache.ambari.components.ComponentModule;
 import org.apache.ambari.components.ComponentPlugin;
 import org.apache.ambari.components.ComponentPluginFactory;
-import org.apache.ambari.datastore.PersistentDataStore;
-import org.apache.ambari.datastore.impl.StaticDataStore;
 
-import com.google.inject.AbstractModule;
 import com.google.inject.Guice;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
@@ -252,18 +248,8 @@ public class StackFlattener {
     return result;
   }
 
-  private static class TestModule extends AbstractModule {
-
-    @Override
-    protected void configure() {
-      install(new ComponentModule());
-      bind(PersistentDataStore.class).to(StaticDataStore.class);
-    }
-    
-  }
-
   public static void main(String[] args) throws Exception {
-    Injector injector = Guice.createInjector(new TestModule());
+    Injector injector = Guice.createInjector(new ControllerModule());
     JAXBContext jaxbContext = 
         JAXBContext.newInstance("org.apache.ambari.common.rest.entities");
     Marshaller marsh = jaxbContext.createMarshaller();

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Stacks.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Stacks.java?rev=1213162&r1=1213161&r2=1213162&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Stacks.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Stacks.java Mon Dec 12 08:21:17 2011
@@ -39,7 +39,8 @@ import org.apache.ambari.common.rest.ent
 import org.apache.ambari.common.rest.entities.StackInformation;
 import org.apache.ambari.common.rest.entities.Component;
 import org.apache.ambari.common.rest.entities.Property;
-import org.apache.ambari.datastore.PersistentDataStore;
+import org.apache.ambari.datastore.DataStoreFactory;
+import org.apache.ambari.datastore.DataStore;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 
@@ -49,11 +50,11 @@ import com.google.inject.Singleton;
 @Singleton
 public class Stacks {
 
-  private final PersistentDataStore dataStore;
+  private final DataStore dataStore;
 
     @Inject
-    Stacks(PersistentDataStore dataStore) throws IOException {
-      this.dataStore = dataStore;
+    Stacks(DataStoreFactory dataStore) throws IOException {
+      this.dataStore = dataStore.getInstance();
       recoverStacksAfterRestart();
     }
     

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/StacksResource.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/StacksResource.java?rev=1213162&r1=1213161&r2=1213162&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/StacksResource.java (original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/rest/resources/StacksResource.java Mon Dec 12 08:21:17 2011
@@ -38,6 +38,8 @@ import org.apache.ambari.controller.Clus
 import org.apache.ambari.controller.Stacks;
 import org.apache.ambari.controller.ExceptionResponse;
 import org.apache.ambari.controller.rest.config.Examples;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 
 import com.google.inject.Inject;
 
@@ -49,6 +51,7 @@ import com.google.inject.Inject;
 @Path("stacks")
 public class StacksResource {
  
+    private static Log LOG = LogFactory.getLog(StacksResource.class);
     private static Stacks stacks;
     private static Clusters clusters;
     
@@ -83,6 +86,7 @@ public class StacksResource {
         }catch (WebApplicationException we) {
             throw we;
         }catch (Exception e) {
+            LOG.error("Caught error in get stacks", e);
             throw new WebApplicationException((new ExceptionResponse(e)).get());
         } 
     }

Added: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/DataStore.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/DataStore.java?rev=1213162&view=auto
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/DataStore.java (added)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/DataStore.java Mon Dec 12 08:21:17 2011
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.datastore;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.ambari.common.rest.entities.ClusterState;
+import org.apache.ambari.common.rest.entities.Stack;
+import org.apache.ambari.common.rest.entities.ClusterDefinition;
+
+/**
+ * Abstraction that stores the Ambari state.
+ */
+public interface DataStore {
+    
+    /*
+     * Shutdown the data store. It will stop the data store service
+     */
+    public void close () throws IOException;
+    
+    /**
+     * Check if cluster exists
+     */
+    public boolean clusterExists(String clusterName) throws IOException;
+    
+    /**
+     * Get Latest cluster Revision Number
+     */
+    public int retrieveLatestClusterRevisionNumber(String clusterName) throws IOException;
+    
+    /**
+     * Store the cluster state
+     */
+    public void storeClusterState (String clusterName, ClusterState clsState) throws IOException;
+    
+    /**
+     * Store the cluster state
+     */
+    public ClusterState retrieveClusterState (String clusterName) throws IOException;
+
+    /**
+     * Store the cluster definition.
+     *
+     * Return the revision number for new or updated cluster definition
+     * If cluster revision is not null then, check if existing revision being updated in the store is same.
+     */
+    public int storeClusterDefinition (ClusterDefinition clusterDef) throws IOException;
+    
+    /**
+     * Retrieve the cluster definition given the cluster name and revision number
+     * If revision number is less than zero, then return latest cluster definition
+     */
+    public ClusterDefinition retrieveClusterDefinition (String clusterName, int revision) throws IOException;
+    
+    /**
+     * Retrieve list of existing cluster names
+     */
+    public List<String> retrieveClusterList () throws IOException;
+      
+    /**
+     * Delete cluster entry
+     */
+    public void deleteCluster (String clusterName) throws IOException;
+    
+    /**
+     * Store the stack configuration.
+     * If stack does not exist, create new one else create new revision
+     * Return the new stack revision 
+     */
+    public int storeStack (String stackName, Stack stack) throws IOException;
+    
+    /**
+     * Retrieve stack with specified revision number
+     * If revision number is less than zero, then return latest cluster definition
+     */
+    public Stack retrieveStack (String stackName, int revision) throws IOException;
+    
+    /**
+     * Retrieve list of stack names
+     * @return
+     * @throws IOException
+     */
+    public List<String> retrieveStackList() throws IOException;
+    
+    /**
+     * Get Latest stack Revision Number
+     */
+    public int retrieveLatestStackRevisionNumber(String stackName) throws IOException;
+    
+    /**
+     * Delete stack
+     */
+    public void deleteStack(String stackName) throws IOException;
+
+    /**
+     * Check if stack exists
+     */
+    boolean stackExists(String stackName) throws IOException;
+    
+}

Added: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/DataStoreFactory.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/DataStoreFactory.java?rev=1213162&view=auto
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/DataStoreFactory.java (added)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/DataStoreFactory.java Mon Dec 12 08:21:17 2011
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.datastore;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+
+import com.google.inject.Inject;
+import com.google.inject.name.Named;
+
+public class DataStoreFactory {
+
+  private final DataStore ds;
+  
+  @Inject
+  DataStoreFactory(@Named("data.store") String dataStore) throws IOException {
+    URI uri;
+    try {
+      uri = new URI(dataStore);
+    } catch (URISyntaxException e) {
+      throw new IllegalArgumentException("Bad data store URI: " + dataStore, e);
+    }
+    String scheme = uri.getScheme();
+    if ("zk".equals(scheme)) {
+      String auth = uri.getAuthority();
+      ds = new ZookeeperDS(auth);
+    } else if ("test".equals(scheme)) {
+      ds = new StaticDataStore();
+    } else {
+      throw new IllegalArgumentException("Unknown data store " + scheme);
+    }
+  }
+  
+  public DataStore getInstance() {
+    return ds;
+  }
+}

Added: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/StaticDataStore.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/StaticDataStore.java?rev=1213162&view=auto
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/StaticDataStore.java (added)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/StaticDataStore.java Mon Dec 12 08:21:17 2011
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.datastore;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Unmarshaller;
+
+import org.apache.ambari.common.rest.entities.ClusterDefinition;
+import org.apache.ambari.common.rest.entities.ClusterState;
+import org.apache.ambari.common.rest.entities.Stack;
+
+import com.google.inject.Singleton;
+import com.sun.jersey.api.json.JSONJAXBContext;
+
+/**
+ * A data store that uses in-memory maps and some preset values for testing.
+ */
+@Singleton
+class StaticDataStore implements DataStore {
+
+  private Map<String, List<ClusterDefinition>> clusters = 
+      new TreeMap<String, List<ClusterDefinition>>();
+
+  private Map<String, List<Stack>> stacks =
+      new TreeMap<String, List<Stack>>();
+  
+  private Map<String, ClusterState> clusterStates =
+      new TreeMap<String, ClusterState>();
+
+  private static final JAXBContext jaxbContext;
+  private static final JAXBContext jsonContext;
+  static {
+    try {
+      jaxbContext = JAXBContext.
+          newInstance("org.apache.ambari.common.rest.entities");
+      jsonContext = JSONJAXBContext.newInstance
+          ("org.apache.ambari.common.rest.entities");
+    } catch (JAXBException e) {
+      throw new RuntimeException("Can't create jaxb context", e);
+    }
+  }
+
+  StaticDataStore() throws IOException {
+    addStackFile("org/apache/ambari/stacks/hadoop-security-0.xml", 
+                 "hadoop-security");
+    addStackFile("org/apache/ambari/stacks/cluster123-0.xml", "cluster123");
+    addStackFile("org/apache/ambari/stacks/cluster124-0.xml", "cluster124");
+    addStackJsonFile("org/apache/ambari/stacks/puppet1-0.xml", "puppet1");
+    addClusterFile("org/apache/ambari/clusters/cluster123.xml", "cluster123");
+  }
+
+  private void addStackFile(String filename, 
+                            String stackName) throws IOException {
+    InputStream in = ClassLoader.getSystemResourceAsStream(filename);
+    if (in == null) {
+      throw new IllegalArgumentException("Can't find resource for " + filename);
+    }
+    try {
+      Unmarshaller um = jaxbContext.createUnmarshaller();
+      Stack stack = (Stack) um.unmarshal(in);
+      storeStack(stackName, stack);
+    } catch (JAXBException je) {
+      throw new IOException("Can't parse " + filename, je);
+    }
+  }
+
+  private void addStackJsonFile(String filename, 
+                                String stackName) throws IOException {
+    InputStream in = ClassLoader.getSystemResourceAsStream(filename);
+    if (in == null) {
+      throw new IllegalArgumentException("Can't find resource for " + filename);
+    }
+    try {
+      Unmarshaller um = jsonContext.createUnmarshaller();
+      Stack stack = (Stack) um.unmarshal(in);
+      storeStack(stackName, stack);
+    } catch (JAXBException je) {
+      throw new IOException("Can't parse " + filename, je);
+    }
+  }
+
+  private void addClusterFile(String filename,
+                              String clusterName) throws IOException {
+    InputStream in = ClassLoader.getSystemResourceAsStream(filename);
+    if (in == null) {
+      throw new IllegalArgumentException("Can't find resource for " + filename);
+    }
+    try {
+      Unmarshaller um = jaxbContext.createUnmarshaller();
+      ClusterDefinition cluster = (ClusterDefinition) um.unmarshal(in);
+      cluster.setName(clusterName);
+      storeClusterDefinition(cluster);
+    } catch (JAXBException je) {
+      throw new IOException("Can't parse " + filename, je);
+    }    
+  }
+
+  @Override
+  public void close() throws IOException {
+    // PASS
+  }
+
+  @Override
+  public boolean clusterExists(String clusterName) throws IOException {
+    return clusters.containsKey(clusterName);
+  }
+
+  @Override
+  public int retrieveLatestClusterRevisionNumber(String clusterName)
+      throws IOException {
+    return clusters.get(clusterName).size()-1;
+  }
+
+  @Override
+  public void storeClusterState(String clusterName, 
+                                ClusterState clsState) throws IOException {
+    clusterStates.put(clusterName, clsState);
+  }
+
+  @Override
+  public ClusterState retrieveClusterState(String clusterName)
+      throws IOException {
+    return clusterStates.get(clusterName);
+  }
+
+  @Override
+  public int storeClusterDefinition(ClusterDefinition clusterDef
+                                    ) throws IOException {
+    String name = clusterDef.getName();
+    List<ClusterDefinition> list = clusters.get(name);
+    if (list == null) {
+      list = new ArrayList<ClusterDefinition>();
+      clusters.put(name, list);
+    }
+    list.add(clusterDef);
+    return list.size() - 1;
+  }
+
+  @Override
+  public ClusterDefinition retrieveClusterDefinition(String clusterName,
+      int revision) throws IOException {
+    return clusters.get(clusterName).get(revision);
+  }
+
+  @Override
+  public List<String> retrieveClusterList() throws IOException {
+    return new ArrayList<String>(clusters.keySet());
+  }
+
+  @Override
+  public void deleteCluster(String clusterName) throws IOException {
+    clusters.remove(clusterName);
+  }
+
+  @Override
+  public int storeStack(String stackName, Stack stack) throws IOException {
+    List<Stack> list = stacks.get(stackName);
+    if (list == null) {
+      list = new ArrayList<Stack>();
+      stacks.put(stackName, list);
+    }
+    int index = list.size();
+    stack.setRevision(Integer.toString(index));
+    list.add(stack);
+    return index;
+  }
+
+  @Override
+  public Stack retrieveStack(String stackName, 
+                             int revision) throws IOException {
+    List<Stack> history = stacks.get(stackName);
+    if (revision == -1) {
+      revision = history.size() - 1;
+    }
+    return history.get(revision);
+  }
+
+  @Override
+  public List<String> retrieveStackList() throws IOException {
+    return new ArrayList<String>(stacks.keySet());
+  }
+
+  @Override
+  public int retrieveLatestStackRevisionNumber(String stackName
+                                               ) throws IOException {
+    return stacks.get(stackName).size() - 1;
+  }
+
+  @Override
+  public void deleteStack(String stackName) throws IOException {
+    stacks.remove(stackName);
+  }
+
+  @Override
+  public boolean stackExists(String stackName) throws IOException {
+    return stacks.containsKey(stackName);
+  }
+
+}

Added: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/ZookeeperDS.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/ZookeeperDS.java?rev=1213162&view=auto
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/ZookeeperDS.java (added)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/datastore/ZookeeperDS.java Mon Dec 12 08:21:17 2011
@@ -0,0 +1,440 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.datastore;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.ambari.common.rest.entities.ClusterDefinition;
+import org.apache.ambari.common.rest.entities.ClusterState;
+import org.apache.ambari.common.rest.entities.Stack;
+import org.apache.ambari.common.util.JAXBUtil;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZooDefs.Ids;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.data.Stat;
+
+/**
+ * Implementation of the data store based on Zookeeper.
+ */
+class ZookeeperDS implements DataStore, Watcher {
+
+  private static final String ZOOKEEPER_ROOT_PATH="/ambari";
+  private static final String ZOOKEEPER_CLUSTERS_ROOT_PATH =
+      ZOOKEEPER_ROOT_PATH + "/clusters";
+  private static final String ZOOKEEPER_STACKS_ROOT_PATH = 
+      ZOOKEEPER_ROOT_PATH + "/stacks";
+
+  private ZooKeeper zk;
+  private String credential = null;
+  private boolean zkCoonected = false;
+
+  ZookeeperDS(String authority) {
+    try {
+      /*
+       * Connect to ZooKeeper server
+       */
+      zk = new ZooKeeper(authority, 600000, this);
+      if(credential != null) {
+        zk.addAuthInfo("digest", credential.getBytes());
+      }
+
+      while (!this.zkCoonected) {
+        System.out.println("Waiting for ZK connection!");
+        Thread.sleep(2000);
+      }
+
+      /*
+       * Create top level directories
+       */
+      createDirectory (ZOOKEEPER_ROOT_PATH, new byte[0], true);
+      createDirectory (ZOOKEEPER_CLUSTERS_ROOT_PATH, new byte[0], true);
+      createDirectory (ZOOKEEPER_STACKS_ROOT_PATH, new byte[0], true);
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    // PASS
+  }
+
+  @Override
+  public boolean clusterExists(String clusterName) throws IOException {
+    try {
+      if (zk.exists(ZOOKEEPER_CLUSTERS_ROOT_PATH+"/"+clusterName, false) 
+            == null) {
+        return false;
+      }
+    } catch (Exception e) {
+      throw new IOException(e);
+    }
+    return true;
+  }
+
+  @Override
+  public synchronized int storeClusterDefinition(ClusterDefinition clusterDef
+      ) throws IOException {  
+    /*
+     * Update the cluster node
+     */
+    try {
+      Stat stat = new Stat();
+      String clusterPath = ZOOKEEPER_CLUSTERS_ROOT_PATH+"/" + 
+                           clusterDef.getName();
+      int newRev = 0;
+      String clusterRevisionPath = clusterPath+"/"+newRev;
+      String clusterLatestRevisionNumberPath = clusterPath + 
+          "/latestRevisionNumber";
+      if (zk.exists(clusterPath, false) == null) {
+        /* 
+         * create cluster path with revision 0, create cluster latest revision
+         * node storing the latest revision of cluster definition.
+         */
+        createDirectory (clusterPath, new byte[0], false);
+        createDirectory (clusterRevisionPath, 
+                         JAXBUtil.write(clusterDef), false);
+        createDirectory (clusterLatestRevisionNumberPath, 
+                         (new Integer(newRev)).toString().getBytes(), false);
+      }else {
+        String latestRevision = 
+            new String (zk.getData(clusterLatestRevisionNumberPath, false, 
+                                   stat));
+        newRev = Integer.parseInt(latestRevision) + 1;
+        clusterRevisionPath = clusterPath + "/" + newRev;
+        /*
+         * If client passes the revision number of the checked out cluster 
+         * definition following code checks if you are updating the same version
+         * that you checked out.
+         */
+        if (clusterDef.getRevision() != null) {
+          if (!latestRevision.equals(clusterDef.getRevision())) {
+            throw new IOException ("Latest cluster definition does not match "+
+                                   "the one client intends to modify!");
+          }  
+        } 
+        createDirectory(clusterRevisionPath, JAXBUtil.write(clusterDef), false);
+        zk.setData(clusterLatestRevisionNumberPath, 
+                   (new Integer(newRev)).toString().getBytes(), -1);
+      }
+      return newRev;
+    } catch (KeeperException e) {
+      throw new IOException (e);
+    } catch (InterruptedException e1) {
+      throw new IOException (e1);
+    }
+  }
+
+  @Override
+  public synchronized void storeClusterState(String clusterName, 
+                                             ClusterState clsState
+                                             ) throws IOException {
+    /*
+     * Update the cluster state
+     */
+    try {
+      String clusterStatePath = 
+          ZOOKEEPER_CLUSTERS_ROOT_PATH+"/"+clusterName+"/state";
+      if (zk.exists(clusterStatePath, false) == null) {
+        // create node for the cluster state
+        createDirectory (clusterStatePath, JAXBUtil.write(clsState), false);
+      }else {
+        zk.setData(clusterStatePath, JAXBUtil.write(clsState), -1);
+      }
+    } catch (KeeperException e) {
+      throw new IOException (e);
+    } catch (InterruptedException e1) {
+      throw new IOException (e1);
+    }
+
+  }
+
+  @Override
+  public ClusterDefinition retrieveClusterDefinition(String clusterName, 
+                                             int revision) throws IOException {
+    try {
+      Stat stat = new Stat();
+      String clusterRevisionPath;
+      if (revision < 0) {   
+        String clusterLatestRevisionNumberPath = 
+           ZOOKEEPER_CLUSTERS_ROOT_PATH+"/"+clusterName+"/latestRevisionNumber";
+        String latestRevisionNumber = 
+          new String (zk.getData(clusterLatestRevisionNumberPath, false, stat));
+        clusterRevisionPath = 
+          ZOOKEEPER_CLUSTERS_ROOT_PATH+"/"+clusterName+"/"+latestRevisionNumber;       
+      } else {
+        clusterRevisionPath = 
+            ZOOKEEPER_CLUSTERS_ROOT_PATH+"/"+clusterName+"/"+revision;
+      }
+      ClusterDefinition cdef = JAXBUtil.read(zk.getData(clusterRevisionPath, 
+          false, stat), ClusterDefinition.class); 
+      return cdef;
+    } catch (Exception e) {
+      throw new IOException (e);
+    }
+  }
+
+  @Override
+  public ClusterState retrieveClusterState(String clusterName
+                                           ) throws IOException {
+    try {
+      Stat stat = new Stat();
+      String clusterStatePath = 
+          ZOOKEEPER_CLUSTERS_ROOT_PATH+"/"+clusterName+"/state";
+      ClusterState clsState = 
+          JAXBUtil.read(zk.getData(clusterStatePath, false, stat), 
+                        ClusterState.class); 
+      return clsState;
+    } catch (Exception e) {
+      throw new IOException (e);
+    }
+  }
+
+  @Override
+  public int retrieveLatestClusterRevisionNumber(String clusterName
+                                                 ) throws IOException {
+    int revisionNumber;
+    try {
+      Stat stat = new Stat();
+      String clusterLatestRevisionNumberPath = 
+          ZOOKEEPER_CLUSTERS_ROOT_PATH+"/"+clusterName+"/latestRevisionNumber";
+      String latestRevisionNumber = 
+          new String (zk.getData(clusterLatestRevisionNumberPath, false, stat));
+      revisionNumber = Integer.parseInt(latestRevisionNumber);
+    } catch (Exception e) {
+      throw new IOException (e);
+    }
+    return revisionNumber;
+  }
+
+  @Override
+  public List<String> retrieveClusterList() throws IOException {
+    try {
+      List<String> children = zk.getChildren(ZOOKEEPER_CLUSTERS_ROOT_PATH, 
+                                             false);
+      return children;
+    } catch (KeeperException e) {
+      throw new IOException (e);
+    } catch (InterruptedException e) {
+      throw new IOException (e);
+    }
+  }
+
+  @Override
+  public void deleteCluster(String clusterName) throws IOException {
+    String clusterPath = ZOOKEEPER_CLUSTERS_ROOT_PATH+"/"+clusterName;
+    List<String> children;
+    try {
+      children = zk.getChildren(clusterPath, false);
+      // Delete all the children and then the parent node
+      for (String childPath : children) {
+        try {
+          zk.delete(childPath, -1);
+        } catch (KeeperException.NoNodeException ke) {
+        } catch (Exception e) { throw new IOException (e); }
+      }
+      zk.delete(clusterPath, -1);
+    } catch (KeeperException.NoNodeException ke) {
+      return;
+    } catch (Exception e) {
+      throw new IOException (e);
+    }
+  }
+
+  @Override
+  public int storeStack(String stackName, Stack stack) throws IOException {
+    try {
+      Stat stat = new Stat();
+      String stackPath = ZOOKEEPER_STACKS_ROOT_PATH+"/"+stackName;
+      int newRev = 0;
+      String stackRevisionPath = stackPath+"/"+newRev;
+      String stackLatestRevisionNumberPath = stackPath+"/latestRevisionNumber";
+      if (zk.exists(stackPath, false) == null) {
+        /* 
+         * create stack path with revision 0, create stack latest revision node
+         * to store the latest revision of stack definition.
+         */
+        createDirectory (stackPath, new byte[0], false);
+        stack.setRevision(new Integer(newRev).toString());
+        createDirectory (stackRevisionPath, JAXBUtil.write(stack), false);
+        createDirectory (stackLatestRevisionNumberPath, 
+            (new Integer(newRev)).toString().getBytes(), false);
+      }else {
+        String latestRevision = 
+            new String (zk.getData(stackLatestRevisionNumberPath, false, stat));
+        newRev = Integer.parseInt(latestRevision) + 1;
+        stackRevisionPath = stackPath + "/" + newRev;
+        /*
+         * TODO: like cluster definition client can pass optionally the checked 
+         * out version number
+         * Following code checks if you are updating the same version that you 
+         * checked out.
+         * if (stack.getRevision() != null) {
+         *   if (!latestRevision.equals(stack.getRevision())) {
+         *     throw new IOException ("Latest cluster definition does not " + 
+         *                           "match the one client intends to modify!");
+         *   }  
+         * } 
+         */
+        stack.setRevision(new Integer(newRev).toString());
+        createDirectory (stackRevisionPath, JAXBUtil.write(stack), false);
+        zk.setData(stackLatestRevisionNumberPath, 
+                   (new Integer(newRev)).toString().getBytes(), -1);
+      }
+      return newRev;
+    } catch (KeeperException e) {
+      throw new IOException (e);
+    } catch (InterruptedException e1) {
+      throw new IOException (e1);
+    }
+  }
+
+  @Override
+  public Stack retrieveStack(String stackName, int revision)
+      throws IOException {
+    try {
+      Stat stat = new Stat();
+      String stackRevisionPath;
+      if (revision < 0) {   
+        String stackLatestRevisionNumberPath = 
+            ZOOKEEPER_STACKS_ROOT_PATH+"/"+stackName+"/latestRevisionNumber";
+        String latestRevisionNumber = 
+            new String (zk.getData(stackLatestRevisionNumberPath, false, stat));
+        stackRevisionPath = 
+            ZOOKEEPER_STACKS_ROOT_PATH+"/"+stackName+"/"+latestRevisionNumber;       
+      } else {
+        stackRevisionPath = 
+            ZOOKEEPER_STACKS_ROOT_PATH+"/"+stackName+"/"+revision;
+      }
+      Stack stack = JAXBUtil.read(zk.getData(stackRevisionPath, false, stat), 
+          Stack.class); 
+      return stack;
+    } catch (Exception e) {
+      throw new IOException (e);
+    }
+  }
+
+  @Override
+  public List<String> retrieveStackList() throws IOException {
+    try {
+      List<String> children = zk.getChildren(ZOOKEEPER_STACKS_ROOT_PATH, false);
+      return children;
+    } catch (KeeperException e) {
+      throw new IOException (e);
+    } catch (InterruptedException e) {
+      throw new IOException (e);
+    }
+  }
+
+  @Override
+  public int retrieveLatestStackRevisionNumber(String stackName
+                                               ) throws IOException { 
+    int revisionNumber;
+    try {
+      Stat stat = new Stat();
+      String stackLatestRevisionNumberPath = 
+          ZOOKEEPER_STACKS_ROOT_PATH+"/"+stackName+"/latestRevisionNumber";
+      String latestRevisionNumber = 
+          new String (zk.getData(stackLatestRevisionNumberPath, false, stat));
+      revisionNumber = Integer.parseInt(latestRevisionNumber);
+    } catch (Exception e) {
+      throw new IOException (e);
+    }
+    return revisionNumber;
+  }
+
+  @Override
+  public void deleteStack(String stackName) throws IOException {
+    String stackPath = ZOOKEEPER_STACKS_ROOT_PATH+"/"+stackName;
+    List<String> children;
+    try {
+      children = zk.getChildren(stackPath, false);
+      // Delete all the children and then the parent node
+      for (String childPath : children) {
+        try {
+          zk.delete(childPath, -1);
+        } catch (KeeperException.NoNodeException ke) {
+        } catch (Exception e) { throw new IOException (e); }
+      }
+      zk.delete(stackPath, -1);
+    } catch (KeeperException.NoNodeException ke) {
+      return;
+    } catch (Exception e) {
+      throw new IOException (e);
+    }
+  }
+
+  @Override
+  public boolean stackExists(String stackName) throws IOException {
+    try {
+      if (zk.exists(ZOOKEEPER_STACKS_ROOT_PATH+"/"+stackName, false) == null) {
+        return false;
+      }
+    } catch (Exception e) {
+      throw new IOException(e);
+    }
+    return true;
+  }
+
+  @Override
+  public void process(WatchedEvent event) {
+    if (event.getType() == Event.EventType.None) {
+      // We are are being told that the state of the
+      // connection has changed
+      switch (event.getState()) {
+      case SyncConnected:
+        // In this particular example we don't need to do anything
+        // here - watches are automatically re-registered with 
+        // server and any watches triggered while the client was 
+        // disconnected will be delivered (in order of course)
+        this.zkCoonected = true;
+        break;
+      case Expired:
+        // It's all over
+        //running = false;
+        //commandHandler.stop();
+        break;
+      }
+    }
+
+  }
+
+  private void createDirectory(String path, byte[] initialData, 
+                               boolean ignoreIfExists
+                               ) throws KeeperException, InterruptedException {
+    try {
+      zk.create(path, initialData, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+      if(credential!=null) {
+        zk.setACL(path, Ids.CREATOR_ALL_ACL, -1);
+      }
+      System.out.println("Created path : <" + path +">");
+    } catch (KeeperException.NodeExistsException e) {
+      if (!ignoreIfExists) {
+        System.out.println("Path already exists <"+path+">");
+        throw e;
+      }
+    } catch (KeeperException.AuthFailedException e) {
+      System.out.println("Failed to authenticate for path <"+path+">");
+      throw e;
+    }
+  }
+}

Added: incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/puppet1-0.json
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/puppet1-0.json?rev=1213162&view=auto
==============================================================================
--- incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/puppet1-0.json (added)
+++ incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/puppet1-0.json Mon Dec 12 08:21:17 2011
@@ -0,0 +1,829 @@
+{
+  "@name":"puppet1",
+  "@revision":"0",
+  "@parentRevision":"-1",
+  "@creationTime":"2011-11-23T18:16:39.604-08:00",
+  "repositories":{
+    "@kind":"TAR",
+    "urls":"http://www.apache.org/dist/hadoop/common/"
+  },
+  "configuration":{
+    "category":[
+      {
+        "@name":"ambari",
+        "property":[
+          {
+            "@name":"ambari_namenode_principal",
+            "@value":"nn"
+          },
+          {
+            "@name":"ambari_datanode_principal",
+            "@value":"dn"
+          },
+          {
+            "@name":"ambari_jobtracker_principal",
+            "@value":"jt"
+          },
+          {
+            "@name":"ambari_tasktracker_principal",
+            "@value":"tt"
+          },
+          {
+            "@name":"ambari_hbasemaster_principal",
+            "@value":"hm"
+          },
+          {
+            "@name":"ambari_regionserver_principal",
+            "@value":"rs"
+          },
+          {
+            "@name":"ambari_hcat_principal",
+            "@value":"hcat"
+          },
+          {
+            "@name":"ambari_hdfs_user",
+            "@value":"hdfs"
+          },
+          {
+            "@name":"ambari_mapreduce_user",
+            "@value":"mapred"
+          },
+          {
+            "@name":"ambari_hbase_user",
+            "@value":"hrt_hbase"
+          },
+          {
+            "@name":"ambari_hcat_user",
+            "@value":"hcat"
+          },
+          {
+            "@name":"ambari_admin_group",
+            "@value":"hadoop"
+          },
+          {
+            "@name":"ambari_dfs_name_dir",
+            "@value":"/var/lib/hadoop/hdfs/namenode,/var/lib/hadoop/hdfs/namenode2,/var/lib/hadoop/hdfs/namenode3"
+          },
+          {
+            "@name":"ambari_dfs_data_dir",
+            "@value":"/var/lib/hadoop/hdfs/datanode,/var/lib/hadoop/hdfs/datanode1,/var/lib/hadoop/hdfs/datanode2"
+          },
+          {
+            "@name":"ambari_user_realm",
+            "@value":"EXAMPLE.KERBEROS.COM"
+          },
+          {
+            "@name":"ambari_service_realm",
+            "@value":"EXAMPLE.KERBEROS.COM"
+          },
+          {
+            "@name":"ambari_hadoop_conf_dir",
+            "@value":"/etc/hadoop/conf"
+          }
+        ]
+      },
+      {
+        "@name":"core-site.xml",
+        "property":[
+          {
+            "@name":"local.realm",
+            "@value":"<%= ambari_service_realm %>"
+          },
+          {
+            "@name":"fs.default.name",
+            "@value":"hdfs://<%= ambari_namenode_host %>:8020"
+          },
+          {
+            "@name":"fs.trash.interval",
+            "@value":"360"
+          },
+          {
+            "@name":"hadoop.security.auth_to_local",
+            "@value":"RULE:[1:$1@$0](.*@<%= ambari_user_realm %>)s/@.*// RULE:[2:$1@$0](<%= ambari_jobtracker_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_mapreduce_user %>/ RULE:[2:$1@$0](<%= ambari_tasktracker_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_mapreduce_user %>/ RULE:[2:$1@$0](<%= ambari_namenode_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_hdfs_user %>/ RULE:[2:$1@$0](<%= ambari_datanode_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_hdfs_user %>/ RULE:[2:$1@$0](<%= ambari_hbasemaster_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_hbase_user %>/ RULE:[2:$1@$0](<%= ambari_regionserver_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_hbase_user %>/ RULE:[2:$1@$0](<%= ambari_hcat_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_hcat_user %>/"
+          },
+          {
+            "@name":"hadoop.security.authentication",
+            "@value":"simple"
+          },
+          {
+            "@name":"hadoop.security.authorization",
+            "@value":"false"
+          },
+          {
+            "@name":"hadoop.security.groups.cache.secs",
+            "@value":"14400"
+          },
+          {
+            "@name":"hadoop.kerberos.kinit.command",
+            "@value":"/usr/kerberos/bin/kinit"
+          },
+          {
+            "@name":"hadoop.http.filter.initializers",
+            "@value":"org.apache.hadoop.http.lib.StaticUserWebFilter"
+          }
+        ]
+      },
+      {
+        "@name":"hdfs-site.xml",
+        "property":[
+          {
+            "@name":"*dfs.name.dir",
+            "@value":"<%= ambari_dfs_name_dir %>"
+          },
+          {
+            "@name":"*dfs.data.dir",
+            "@value":"<%= ambari_dfs_data_dir %>"
+          },
+          {
+            "@name":"dfs.safemode.threshold.pct",
+            "@value":"1.0f"
+          },
+          {
+            "@name":"dfs.datanode.address",
+            "@value":"0.0.0.0:50010"
+          },
+          {
+            "@name":"dfs.datanode.http.address",
+            "@value":"0.0.0.0:50075"
+          },
+          {
+            "@name":"*dfs.http.address",
+            "@value":"<%= ambari_namenode_host %>:50070"
+          },
+          {
+            "@name":"dfs.umaskmode",
+            "@value":"077"
+          },
+          {
+            "@name":"dfs.block.access.token.enable",
+            "@value":"false"
+          },
+          {
+            "@name":"dfs.namenode.kerberos.principal",
+            "@value":"<%= ambari_namenode_principal %>/_HOST@<%= ambari_service_realm %>"
+          },
+          {
+            "@name":"dfs.namenode.kerberos.https.principal",
+            "@value":"host/_HOST@<%= ambari_service_realm %>"
+          },
+          {
+            "@name":"dfs.secondary.namenode.kerberos.principal",
+            "@value":"<%= ambari_namenode_principal %>/_HOST@<%= ambari_service_realm %>"
+          },
+          {
+            "@name":"dfs.secondary.namenode.kerberos.https.principal",
+            "@value":"host/_HOST@<%= ambari_service_realm %>"
+          },
+          {
+            "@name":"dfs.datanode.kerberos.principal",
+            "@value":"<%= ambari_datanode_principal %>/_HOST@<%= ambari_service_realm %>"
+          },
+          {
+            "@name":"dfs.web.authentication.kerberos.principal",
+            "@value":"HTTP/_HOST@<%= ambari_service_realm %>"
+          },
+          {
+            "@name":"dfs.web.authentication.kerberos.keytab",
+            "@value":"/etc/security/keytabs/<%= ambari_namenode_principal %>.service.keytab"
+          },
+          {
+            "@name":"dfs.namenode.keytab.file",
+            "@value":"/etc/security/keytabs/<%= ambari_namenode_principal %>.service.keytab"
+          },
+          {
+            "@name":"dfs.secondary.namenode.keytab.file",
+            "@value":"/etc/security/keytabs/<%= ambari_namenode_principal %>.service.keytab"
+          },
+          {
+            "@name":"dfs.datanode.keytab.file",
+            "@value":"/etc/security/keytabs/<%= ambari_datanode_principal %>.service.keytab"
+          },
+          {
+            "@name":"dfs.secondary.https.port",
+            "@value":"50490"
+          },
+          {
+            "@name":"dfs.https.port",
+            "@value":"50470"
+          },
+          {
+            "@name":"dfs.https.address",
+            "@value":"<%= ambari_namenode_host %>:50470"
+          },
+          {
+            "@name":"dfs.datanode.data.dir.perm",
+            "@value":"700"
+          },
+          {
+            "@name":"dfs.cluster.administrators",
+            "@value":"<%= ambari_hdfs_user %>"
+          },
+          {
+            "@name":"dfs.permissions.superusergroup",
+            "@value":"<%= ambari_admin_group %>"
+          },
+          {
+            "@name":"dfs.secondary.http.address",
+            "@value":"<%= ambari_namenode_host %>:50090"
+          },
+          {
+            "@name":"dfs.hosts",
+            "@value":"<%= ambari_hadoop_conf_dir %>/<%= ambari_cluster_name %>/<%= ambari_role_name %>/dfs.include"
+          },
+          {
+            "@name":"dfs.hosts.exclude",
+            "@value":"<%= ambari_hadoop_conf_dir %>/<%= ambari_cluster_name %>/<%= ambari_role_name %>/dfs.exclude"
+          },
+          {
+            "@name":"dfs.webhdfs.enabled",
+            "@value":"true"
+          },
+          {
+            "@name":"dfs.support.append",
+            "@value":"true"
+          }
+        ]
+      },
+      {
+        "@name":"hadoop-env.sh",
+        "property":[
+          {
+            "@name":"JAVA_HOME",
+            "@value":"/usr/jdk1.6.0_27"
+          },
+          {
+            "@name":"HADOOP_CONF_DIR",
+            "@value":"<%= ambari_hadoop_conf_dir %>/<%= ambari_cluster_name %>/<%= ambari_role_name %>"
+          },
+          {
+            "@name":"HADOOP_OPTS",
+            "@value":"-Djava.net.preferIPv4Stack=true $HADOOP_OPTS"
+          },
+          {
+            "@name":"HADOOP_NAMENODE_OPTS",
+            "@value":"-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"
+          },
+          {
+            "@name":"HADOOP_SECONDARYNAMENODE_OPTS",
+            "@value":"-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
+          },
+          {
+            "@name":"HADOOP_JOBTRACKER_OPTS",
+            "@value":"-Dsecurity.audit.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA $HADOOP_JOBTRACKER_OPTS"
+          },
+          {
+            "@name":"HADOOP_TASKTRACKER_OPTS",
+            "@value":"-Dsecurity.audit.logger=ERROR,console -Dmapred.audit.logger=ERROR,console $HADOOP_TASKTRACKER_OPTS"
+          },
+          {
+            "@name":"HADOOP_DATANODE_OPTS",
+            "@value":"-Dsecurity.audit.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"
+          },
+          {
+            "@name":"HADOOP_CLIENT_OPTS",
+            "@value":"-Xmx128m $HADOOP_CLIENT_OPTS"
+          },
+          {
+            "@name":"HADOOP_SECURE_DN_USER",
+            "@value":""
+          },
+          {
+            "@name":"HADOOP_LOG_DIR",
+            "@value":"/var/log/hadoop/$USER"
+          },
+          {
+            "@name":"HADOOP_SECURE_DN_LOG_DIR",
+            "@value":"/var/log/hadoop/<%= ambari_hdfs_user %>"
+          },
+          {
+            "@name":"HADOOP_PID_DIR",
+            "@value":"/var/run/hadoop"
+          },
+          {
+            "@name":"HADOOP_SECURE_DN_PID_DIR",
+            "@value":"/var/run/hadoop"
+          },
+          {
+            "@name":"HADOOP_IDENT_STRING",
+            "@value":"<%= ambari_cluster_name %>"
+          }
+        ]
+      },
+      {
+        "@name":"hadoop-metrics2.properties",
+        "property":{
+          "@name":"*.period",
+          "@value":"60"
+        }
+      }
+    ]
+  },
+  "components":[
+    {
+      "@name":"common",
+      "@architecture":"x86_64",
+      "@version":"0.20.205.0",
+      "@provider":"org.apache.hadoop",
+      "definition":{
+        "@provider":"org.apache.ambari",
+        "@name":"hadoop-common",
+        "@version":"0.1.0"
+      }
+    },
+    {
+      "@name":"hdfs",
+      "@architecture":"x86_64",
+      "@version":"0.20.205.0",
+      "@provider":"org.apache.hadoop",
+      "definition":{
+        "@provider":"org.apache.ambari",
+        "@name":"hadoop-hdfs",
+        "@version":"0.1.0"
+      },
+      "roles":[
+        {
+          "@name":"namenode",
+          "configuration":{
+            "category":[
+              {
+                "@name":"core-site.xml",
+                "property":[
+                  {
+                    "@name":"local.realm",
+                    "@value":"<%= ambari_service_realm %>"
+                  },
+                  {
+                    "@name":"fs.default.name",
+                    "@value":"hdfs://<%= ambari_namenode_host %>:8020"
+                  },
+                  {
+                    "@name":"fs.trash.interval",
+                    "@value":"360"
+                  },
+                  {
+                    "@name":"hadoop.security.auth_to_local",
+                    "@value":"RULE:[1:$1@$0](.*@<%= ambari_user_realm %>)s/@.*// RULE:[2:$1@$0](<%= ambari_jobtracker_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_mapreduce_user %>/ RULE:[2:$1@$0](<%= ambari_tasktracker_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_mapreduce_user %>/ RULE:[2:$1@$0](<%= ambari_namenode_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_hdfs_user %>/ RULE:[2:$1@$0](<%= ambari_datanode_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_hdfs_user %>/ RULE:[2:$1@$0](<%= ambari_hbasemaster_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_hbase_user %>/ RULE:[2:$1@$0](<%= ambari_regionserver_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_hbase_user %>/ RULE:[2:$1@$0](<%= ambari_hcat_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_hcat_user %>/"
+                  },
+                  {
+                    "@name":"hadoop.security.authentication",
+                    "@value":"simple"
+                  },
+                  {
+                    "@name":"hadoop.security.authorization",
+                    "@value":"false"
+                  },
+                  {
+                    "@name":"hadoop.security.groups.cache.secs",
+                    "@value":"14400"
+                  },
+                  {
+                    "@name":"hadoop.kerberos.kinit.command",
+                    "@value":"/usr/kerberos/bin/kinit"
+                  },
+                  {
+                    "@name":"hadoop.http.filter.initializers",
+                    "@value":"org.apache.hadoop.http.lib.StaticUserWebFilter"
+                  }
+                ]
+              },
+              {
+                "@name":"hdfs-site.xml",
+                "property":[
+                  {
+                    "@name":"*dfs.name.dir",
+                    "@value":"<%= ambari_dfs_name_dir %>"
+                  },
+                  {
+                    "@name":"*dfs.data.dir",
+                    "@value":"<%= ambari_dfs_data_dir %>"
+                  },
+                  {
+                    "@name":"dfs.safemode.threshold.pct",
+                    "@value":"1.0f"
+                  },
+                  {
+                    "@name":"dfs.datanode.address",
+                    "@value":"0.0.0.0:50010"
+                  },
+                  {
+                    "@name":"dfs.datanode.http.address",
+                    "@value":"0.0.0.0:50075"
+                  },
+                  {
+                    "@name":"*dfs.http.address",
+                    "@value":"<%= ambari_namenode_host %>:50070"
+                  },
+                  {
+                    "@name":"dfs.umaskmode",
+                    "@value":"077"
+                  },
+                  {
+                    "@name":"dfs.block.access.token.enable",
+                    "@value":"false"
+                  },
+                  {
+                    "@name":"dfs.namenode.kerberos.principal",
+                    "@value":"<%= ambari_namenode_principal %>/_HOST@<%= ambari_service_realm %>"
+                  },
+                  {
+                    "@name":"dfs.namenode.kerberos.https.principal",
+                    "@value":"host/_HOST@<%= ambari_service_realm %>"
+                  },
+                  {
+                    "@name":"dfs.secondary.namenode.kerberos.principal",
+                    "@value":"<%= ambari_namenode_principal %>/_HOST@<%= ambari_service_realm %>"
+                  },
+                  {
+                    "@name":"dfs.secondary.namenode.kerberos.https.principal",
+                    "@value":"host/_HOST@<%= ambari_service_realm %>"
+                  },
+                  {
+                    "@name":"dfs.datanode.kerberos.principal",
+                    "@value":"<%= ambari_datanode_principal %>/_HOST@<%= ambari_service_realm %>"
+                  },
+                  {
+                    "@name":"dfs.web.authentication.kerberos.principal",
+                    "@value":"HTTP/_HOST@<%= ambari_service_realm %>"
+                  },
+                  {
+                    "@name":"dfs.web.authentication.kerberos.keytab",
+                    "@value":"/etc/security/keytabs/<%= ambari_namenode_principal %>.service.keytab"
+                  },
+                  {
+                    "@name":"dfs.namenode.keytab.file",
+                    "@value":"/etc/security/keytabs/<%= ambari_namenode_principal %>.service.keytab"
+                  },
+                  {
+                    "@name":"dfs.secondary.namenode.keytab.file",
+                    "@value":"/etc/security/keytabs/<%= ambari_namenode_principal %>.service.keytab"
+                  },
+                  {
+                    "@name":"dfs.datanode.keytab.file",
+                    "@value":"/etc/security/keytabs/<%= ambari_datanode_principal %>.service.keytab"
+                  },
+                  {
+                    "@name":"dfs.secondary.https.port",
+                    "@value":"50490"
+                  },
+                  {
+                    "@name":"dfs.https.port",
+                    "@value":"50470"
+                  },
+                  {
+                    "@name":"dfs.https.address",
+                    "@value":"<%= ambari_namenode_host %>:50470"
+                  },
+                  {
+                    "@name":"dfs.datanode.data.dir.perm",
+                    "@value":"700"
+                  },
+                  {
+                    "@name":"dfs.cluster.administrators",
+                    "@value":"<%= ambari_hdfs_user %>"
+                  },
+                  {
+                    "@name":"dfs.permissions.superusergroup",
+                    "@value":"<%= ambari_admin_group %>"
+                  },
+                  {
+                    "@name":"dfs.secondary.http.address",
+                    "@value":"<%= ambari_namenode_host %>:50090"
+                  },
+                  {
+                    "@name":"dfs.hosts",
+                    "@value":"<%= ambari_hadoop_conf_dir %>/<%= ambari_cluster_name %>/<%= ambari_role_name %>/dfs.include"
+                  },
+                  {
+                    "@name":"dfs.hosts.exclude",
+                    "@value":"<%= ambari_hadoop_conf_dir %>/<%= ambari_cluster_name %>/<%= ambari_role_name %>/dfs.exclude"
+                  },
+                  {
+                    "@name":"dfs.webhdfs.enabled",
+                    "@value":"true"
+                  },
+                  {
+                    "@name":"dfs.support.append",
+                    "@value":"true"
+                  }
+                ]
+              },
+              {
+                "@name":"hadoop-env.sh",
+                "property":[
+                  {
+                    "@name":"JAVA_HOME",
+                    "@value":"/usr/jdk1.6.0_27"
+                  },
+                  {
+                    "@name":"HADOOP_CONF_DIR",
+                    "@value":"<%= ambari_hadoop_conf_dir %>/<%= ambari_cluster_name %>/<%= ambari_role_name %>"
+                  },
+                  {
+                    "@name":"HADOOP_OPTS",
+                    "@value":"-Djava.net.preferIPv4Stack=true $HADOOP_OPTS"
+                  },
+                  {
+                    "@name":"HADOOP_NAMENODE_OPTS",
+                    "@value":"-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"
+                  },
+                  {
+                    "@name":"HADOOP_SECONDARYNAMENODE_OPTS",
+                    "@value":"-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
+                  },
+                  {
+                    "@name":"HADOOP_JOBTRACKER_OPTS",
+                    "@value":"-Dsecurity.audit.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA $HADOOP_JOBTRACKER_OPTS"
+                  },
+                  {
+                    "@name":"HADOOP_TASKTRACKER_OPTS",
+                    "@value":"-Dsecurity.audit.logger=ERROR,console -Dmapred.audit.logger=ERROR,console $HADOOP_TASKTRACKER_OPTS"
+                  },
+                  {
+                    "@name":"HADOOP_DATANODE_OPTS",
+                    "@value":"-Dsecurity.audit.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"
+                  },
+                  {
+                    "@name":"HADOOP_CLIENT_OPTS",
+                    "@value":"-Xmx128m $HADOOP_CLIENT_OPTS"
+                  },
+                  {
+                    "@name":"HADOOP_SECURE_DN_USER",
+                    "@value":""
+                  },
+                  {
+                    "@name":"HADOOP_LOG_DIR",
+                    "@value":"/var/log/hadoop/$USER"
+                  },
+                  {
+                    "@name":"HADOOP_SECURE_DN_LOG_DIR",
+                    "@value":"/var/log/hadoop/<%= ambari_hdfs_user %>"
+                  },
+                  {
+                    "@name":"HADOOP_PID_DIR",
+                    "@value":"/var/run/hadoop"
+                  },
+                  {
+                    "@name":"HADOOP_SECURE_DN_PID_DIR",
+                    "@value":"/var/run/hadoop"
+                  },
+                  {
+                    "@name":"HADOOP_IDENT_STRING",
+                    "@value":"<%= ambari_cluster_name %>"
+                  }
+                ]
+              },
+              {
+                "@name":"hadoop-metrics2.properties",
+                "property":{
+                  "@name":"*.period",
+                  "@value":"60"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "@name":"datanode",
+          "configuration":{
+            "category":[
+              {
+                "@name":"core-site.xml",
+                "property":[
+                  {
+                    "@name":"local.realm",
+                    "@value":"<%= ambari_service_realm %>"
+                  },
+                  {
+                    "@name":"fs.default.name",
+                    "@value":"hdfs://<%= ambari_namenode_host %>:8020"
+                  },
+                  {
+                    "@name":"fs.trash.interval",
+                    "@value":"360"
+                  },
+                  {
+                    "@name":"hadoop.security.auth_to_local",
+                    "@value":"RULE:[1:$1@$0](.*@<%= ambari_user_realm %>)s/@.*// RULE:[2:$1@$0](<%= ambari_jobtracker_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_mapreduce_user %>/ RULE:[2:$1@$0](<%= ambari_tasktracker_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_mapreduce_user %>/ RULE:[2:$1@$0](<%= ambari_namenode_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_hdfs_user %>/ RULE:[2:$1@$0](<%= ambari_datanode_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_hdfs_user %>/ RULE:[2:$1@$0](<%= ambari_hbasemaster_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_hbase_user %>/ RULE:[2:$1@$0](<%= ambari_regionserver_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_hbase_user %>/ RULE:[2:$1@$0](<%= ambari_hcat_principal %>@<%= ambari_service_realm %>)s/.*/<%= ambari_hcat_user %>/"
+                  },
+                  {
+                    "@name":"hadoop.security.authentication",
+                    "@value":"simple"
+                  },
+                  {
+                    "@name":"hadoop.security.authorization",
+                    "@value":"false"
+                  },
+                  {
+                    "@name":"hadoop.security.groups.cache.secs",
+                    "@value":"14400"
+                  },
+                  {
+                    "@name":"hadoop.kerberos.kinit.command",
+                    "@value":"/usr/kerberos/bin/kinit"
+                  },
+                  {
+                    "@name":"hadoop.http.filter.initializers",
+                    "@value":"org.apache.hadoop.http.lib.StaticUserWebFilter"
+                  }
+                ]
+              },
+              {
+                "@name":"hdfs-site.xml",
+                "property":[
+                  {
+                    "@name":"*dfs.name.dir",
+                    "@value":"<%= ambari_dfs_name_dir %>"
+                  },
+                  {
+                    "@name":"*dfs.data.dir",
+                    "@value":"<%= ambari_dfs_data_dir %>"
+                  },
+                  {
+                    "@name":"dfs.safemode.threshold.pct",
+                    "@value":"1.0f"
+                  },
+                  {
+                    "@name":"dfs.datanode.address",
+                    "@value":"0.0.0.0:50010"
+                  },
+                  {
+                    "@name":"dfs.datanode.http.address",
+                    "@value":"0.0.0.0:50075"
+                  },
+                  {
+                    "@name":"*dfs.http.address",
+                    "@value":"<%= ambari_namenode_host %>:50070"
+                  },
+                  {
+                    "@name":"dfs.umaskmode",
+                    "@value":"077"
+                  },
+                  {
+                    "@name":"dfs.block.access.token.enable",
+                    "@value":"false"
+                  },
+                  {
+                    "@name":"dfs.namenode.kerberos.principal",
+                    "@value":"<%= ambari_namenode_principal %>/_HOST@<%= ambari_service_realm %>"
+                  },
+                  {
+                    "@name":"dfs.namenode.kerberos.https.principal",
+                    "@value":"host/_HOST@<%= ambari_service_realm %>"
+                  },
+                  {
+                    "@name":"dfs.secondary.namenode.kerberos.principal",
+                    "@value":"<%= ambari_namenode_principal %>/_HOST@<%= ambari_service_realm %>"
+                  },
+                  {
+                    "@name":"dfs.secondary.namenode.kerberos.https.principal",
+                    "@value":"host/_HOST@<%= ambari_service_realm %>"
+                  },
+                  {
+                    "@name":"dfs.datanode.kerberos.principal",
+                    "@value":"<%= ambari_datanode_principal %>/_HOST@<%= ambari_service_realm %>"
+                  },
+                  {
+                    "@name":"dfs.web.authentication.kerberos.principal",
+                    "@value":"HTTP/_HOST@<%= ambari_service_realm %>"
+                  },
+                  {
+                    "@name":"dfs.web.authentication.kerberos.keytab",
+                    "@value":"/etc/security/keytabs/<%= ambari_namenode_principal %>.service.keytab"
+                  },
+                  {
+                    "@name":"dfs.namenode.keytab.file",
+                    "@value":"/etc/security/keytabs/<%= ambari_namenode_principal %>.service.keytab"
+                  },
+                  {
+                    "@name":"dfs.secondary.namenode.keytab.file",
+                    "@value":"/etc/security/keytabs/<%= ambari_namenode_principal %>.service.keytab"
+                  },
+                  {
+                    "@name":"dfs.datanode.keytab.file",
+                    "@value":"/etc/security/keytabs/<%= ambari_datanode_principal %>.service.keytab"
+                  },
+                  {
+                    "@name":"dfs.secondary.https.port",
+                    "@value":"50490"
+                  },
+                  {
+                    "@name":"dfs.https.port",
+                    "@value":"50470"
+                  },
+                  {
+                    "@name":"dfs.https.address",
+                    "@value":"<%= ambari_namenode_host %>:50470"
+                  },
+                  {
+                    "@name":"dfs.datanode.data.dir.perm",
+                    "@value":"700"
+                  },
+                  {
+                    "@name":"dfs.cluster.administrators",
+                    "@value":"<%= ambari_hdfs_user %>"
+                  },
+                  {
+                    "@name":"dfs.permissions.superusergroup",
+                    "@value":"<%= ambari_admin_group %>"
+                  },
+                  {
+                    "@name":"dfs.secondary.http.address",
+                    "@value":"<%= ambari_namenode_host %>:50090"
+                  },
+                  {
+                    "@name":"dfs.hosts",
+                    "@value":"<%= ambari_hadoop_conf_dir %>/<%= ambari_cluster_name %>/<%= ambari_role_name %>/dfs.include"
+                  },
+                  {
+                    "@name":"dfs.hosts.exclude",
+                    "@value":"<%= ambari_hadoop_conf_dir %>/<%= ambari_cluster_name %>/<%= ambari_role_name %>/dfs.exclude"
+                  },
+                  {
+                    "@name":"dfs.webhdfs.enabled",
+                    "@value":"true"
+                  },
+                  {
+                    "@name":"dfs.support.append",
+                    "@value":"true"
+                  }
+                ]
+              },
+              {
+                "@name":"hadoop-env.sh",
+                "property":[
+                  {
+                    "@name":"JAVA_HOME",
+                    "@value":"/usr/jdk1.6.0_27"
+                  },
+                  {
+                    "@name":"HADOOP_CONF_DIR",
+                    "@value":"<%= ambari_hadoop_conf_dir %>/<%= ambari_cluster_name %>/<%= ambari_role_name %>"
+                  },
+                  {
+                    "@name":"HADOOP_OPTS",
+                    "@value":"-Djava.net.preferIPv4Stack=true $HADOOP_OPTS"
+                  },
+                  {
+                    "@name":"HADOOP_NAMENODE_OPTS",
+                    "@value":"-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"
+                  },
+                  {
+                    "@name":"HADOOP_SECONDARYNAMENODE_OPTS",
+                    "@value":"-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
+                  },
+                  {
+                    "@name":"HADOOP_JOBTRACKER_OPTS",
+                    "@value":"-Dsecurity.audit.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA $HADOOP_JOBTRACKER_OPTS"
+                  },
+                  {
+                    "@name":"HADOOP_TASKTRACKER_OPTS",
+                    "@value":"-Dsecurity.audit.logger=ERROR,console -Dmapred.audit.logger=ERROR,console $HADOOP_TASKTRACKER_OPTS"
+                  },
+                  {
+                    "@name":"HADOOP_DATANODE_OPTS",
+                    "@value":"-Dsecurity.audit.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"
+                  },
+                  {
+                    "@name":"HADOOP_CLIENT_OPTS",
+                    "@value":"-Xmx128m $HADOOP_CLIENT_OPTS"
+                  },
+                  {
+                    "@name":"HADOOP_SECURE_DN_USER",
+                    "@value":""
+                  },
+                  {
+                    "@name":"HADOOP_LOG_DIR",
+                    "@value":"/var/log/hadoop/$USER"
+                  },
+                  {
+                    "@name":"HADOOP_SECURE_DN_LOG_DIR",
+                    "@value":"/var/log/hadoop/<%= ambari_hdfs_user %>"
+                  },
+                  {
+                    "@name":"HADOOP_PID_DIR",
+                    "@value":"/var/run/hadoop"
+                  },
+                  {
+                    "@name":"HADOOP_SECURE_DN_PID_DIR",
+                    "@value":"/var/run/hadoop"
+                  },
+                  {
+                    "@name":"HADOOP_IDENT_STRING",
+                    "@value":"<%= ambari_cluster_name %>"
+                  }
+                ]
+              },
+              {
+                "@name":"hadoop-metrics2.properties",
+                "property":{
+                  "@name":"*.period",
+                  "@value":"60"
+                }
+              }
+            ]
+          }
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file

Modified: incubator/ambari/trunk/controller/src/test/java/org/apache/ambari/controller/TestHeartbeat.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/test/java/org/apache/ambari/controller/TestHeartbeat.java?rev=1213162&r1=1213161&r2=1213162&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/test/java/org/apache/ambari/controller/TestHeartbeat.java (original)
+++ incubator/ambari/trunk/controller/src/test/java/org/apache/ambari/controller/TestHeartbeat.java Mon Dec 12 08:21:17 2011
@@ -41,8 +41,6 @@ import org.apache.ambari.common.rest.ent
 import org.apache.ambari.components.ComponentPlugin;
 import org.apache.ambari.controller.HeartbeatHandler.ClusterNameAndRev;
 import org.apache.ambari.controller.HeartbeatHandler.SpecialServiceIDs;
-import org.apache.ambari.datastore.PersistentDataStore;
-import org.apache.ambari.datastore.impl.StaticDataStore;
 import org.apache.ambari.event.EventHandler;
 import org.apache.ambari.resource.statemachine.ClusterFSM;
 import org.apache.ambari.resource.statemachine.FSMDriverInterface;
@@ -60,6 +58,7 @@ import org.testng.annotations.Test;
 
 import com.google.inject.Guice;
 import com.google.inject.Injector;
+import com.google.inject.name.Names;
 
 public class TestHeartbeat {
   
@@ -82,7 +81,7 @@ public class TestHeartbeat {
     @Override
     protected void configure() {
       super.configure();
-      bind(PersistentDataStore.class).to(StaticDataStore.class);
+      bindConstant().annotatedWith(Names.named("data.store")).to("test:/");
       bind(FSMDriverInterface.class).to(TestFSMDriverImpl.class);
     }
   }

Added: incubator/ambari/trunk/controller/src/test/java/org/apache/ambari/datastore/TestStaticDataStore.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/test/java/org/apache/ambari/datastore/TestStaticDataStore.java?rev=1213162&view=auto
==============================================================================
--- incubator/ambari/trunk/controller/src/test/java/org/apache/ambari/datastore/TestStaticDataStore.java (added)
+++ incubator/ambari/trunk/controller/src/test/java/org/apache/ambari/datastore/TestStaticDataStore.java Mon Dec 12 08:21:17 2011
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.datastore;
+
+import org.apache.ambari.common.rest.entities.Stack;
+import org.apache.ambari.datastore.DataStore;
+import org.apache.ambari.datastore.StaticDataStore;
+import org.testng.annotations.Test;
+import static org.testng.AssertJUnit.assertEquals;
+
+public class TestStaticDataStore {
+
+  @Test
+  public void testGetStack() throws Exception {
+    DataStore ds = new StaticDataStore();
+    Stack stack = ds.retrieveStack("hadoop-security", -1);
+    assertEquals("can fetch revision -1", "0", stack.getRevision());
+  }
+}