You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@helix.apache.org by ki...@apache.org on 2012/10/25 01:14:57 UTC

[3/42] Refactoring the package names and removing jsql parser

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/StateModelsResource.java
----------------------------------------------------------------------
diff --git a/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/StateModelsResource.java b/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/StateModelsResource.java
new file mode 100644
index 0000000..1ec7e09
--- /dev/null
+++ b/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/StateModelsResource.java
@@ -0,0 +1,150 @@
+/**
+ * Copyright (C) 2012 LinkedIn Inc <op...@linkedin.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.helix.webapp.resources;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.helix.HelixDataAccessor;
+import org.apache.helix.HelixException;
+import org.apache.helix.ZNRecord;
+import org.apache.helix.manager.zk.ZkClient;
+import org.apache.helix.model.StateModelDefinition;
+import org.apache.helix.tools.ClusterSetup;
+import org.apache.helix.webapp.RestAdminApplication;
+import org.apache.log4j.Logger;
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.restlet.Context;
+import org.restlet.data.MediaType;
+import org.restlet.data.Request;
+import org.restlet.data.Response;
+import org.restlet.data.Status;
+import org.restlet.resource.Representation;
+import org.restlet.resource.Resource;
+import org.restlet.resource.StringRepresentation;
+import org.restlet.resource.Variant;
+
+
+public class StateModelsResource extends Resource
+{
+  private final static Logger LOG = Logger.getLogger(StateModelsResource.class);
+
+  public StateModelsResource(Context context,
+      Request request,
+      Response response) 
+  {
+    super(context, request, response);
+    getVariants().add(new Variant(MediaType.TEXT_PLAIN));
+    getVariants().add(new Variant(MediaType.APPLICATION_JSON));
+  }
+
+  @Override
+  public boolean allowGet()
+  {
+    return true;
+  }
+  
+  @Override
+  public boolean allowPost()
+  {
+    return true;
+  }
+  
+  @Override
+  public boolean allowPut()
+  {
+    return false;
+  }
+  
+  @Override
+  public boolean allowDelete()
+  {
+    return false;
+  }
+  
+  @Override
+  public Representation represent(Variant variant)
+  {
+    StringRepresentation presentation = null;
+    try
+    {
+      presentation = getStateModelsRepresentation();
+    }
+    
+    catch(Exception e)
+    {
+      String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
+      presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
+
+      LOG.error("", e);
+    }
+    return presentation;
+  }
+  
+  StringRepresentation getStateModelsRepresentation() throws JsonGenerationException, JsonMappingException, IOException
+  {
+    String clusterName = (String)getRequest().getAttributes().get("clusterName");
+    ZkClient zkClient = (ZkClient)getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
+    ClusterSetup setupTool = new ClusterSetup(zkClient);
+    
+    List<String> models = setupTool.getClusterManagementTool().getStateModelDefs(clusterName);
+    
+    ZNRecord modelDefinitions = new ZNRecord("modelDefinitions");
+    modelDefinitions.setListField("models", models);
+    
+    StringRepresentation representation = new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(modelDefinitions), MediaType.APPLICATION_JSON);
+    
+    return representation;
+  }
+  
+  @Override
+  public void acceptRepresentation(Representation entity)
+  {
+    try
+    {
+      String clusterName = (String)getRequest().getAttributes().get("clusterName");
+      ZkClient zkClient = (ZkClient)getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);;
+      
+      JsonParameters jsonParameters = new JsonParameters(entity);
+      String command = jsonParameters.getCommand();
+
+        
+      if(command.equalsIgnoreCase(ClusterSetup.addStateModelDef))
+      {
+        ZNRecord newStateModel = jsonParameters.getExtraParameter(JsonParameters.NEW_STATE_MODEL_DEF);
+        HelixDataAccessor accessor = ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName);
+         
+        accessor.setProperty(accessor.keyBuilder().stateModelDef(newStateModel.getId()), new StateModelDefinition(newStateModel) );
+        getResponse().setEntity(getStateModelsRepresentation());
+      }
+      else
+      {
+          throw new HelixException("Unsupported command: " + command
+                                   + ". Should be one of [" + ClusterSetup.addStateModelDef + "]");
+      }
+      
+      getResponse().setStatus(Status.SUCCESS_OK);
+    }
+    catch(Exception e)
+    {
+      getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
+          MediaType.APPLICATION_JSON);
+      getResponse().setStatus(Status.SUCCESS_OK);
+      LOG.error("Error in posting " + entity, e);
+    }  
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/StatusUpdateResource.java
----------------------------------------------------------------------
diff --git a/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/StatusUpdateResource.java b/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/StatusUpdateResource.java
new file mode 100644
index 0000000..a105e75
--- /dev/null
+++ b/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/StatusUpdateResource.java
@@ -0,0 +1,127 @@
+/**
+ * Copyright (C) 2012 LinkedIn Inc <op...@linkedin.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.helix.webapp.resources;
+
+import java.io.IOException;
+
+import org.apache.helix.PropertyKey;
+import org.apache.helix.PropertyKey.Builder;
+import org.apache.helix.manager.zk.ZkClient;
+import org.apache.helix.webapp.RestAdminApplication;
+import org.apache.log4j.Logger;
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.restlet.Context;
+import org.restlet.data.MediaType;
+import org.restlet.data.Request;
+import org.restlet.data.Response;
+import org.restlet.resource.Representation;
+import org.restlet.resource.Resource;
+import org.restlet.resource.StringRepresentation;
+import org.restlet.resource.Variant;
+
+
+public class StatusUpdateResource extends Resource
+{
+  private final static Logger LOG = Logger.getLogger(StatusUpdateResource.class);
+
+  public StatusUpdateResource(Context context, Request request, Response response)
+  {
+    super(context, request, response);
+    getVariants().add(new Variant(MediaType.TEXT_PLAIN));
+    getVariants().add(new Variant(MediaType.APPLICATION_JSON));
+  }
+
+  @Override
+  public boolean allowGet()
+  {
+    return true;
+  }
+
+  @Override
+  public boolean allowPost()
+  {
+    return false;
+  }
+
+  @Override
+  public boolean allowPut()
+  {
+    return false;
+  }
+
+  @Override
+  public boolean allowDelete()
+  {
+    return false;
+  }
+
+  @Override
+  public Representation represent(Variant variant)
+  {
+    StringRepresentation presentation = null;
+    try
+    {
+      String clusterName = (String) getRequest().getAttributes().get("clusterName");
+      String instanceName = (String) getRequest().getAttributes().get("instanceName");
+      String resourceGroup = (String) getRequest().getAttributes().get("resourceName");
+
+      presentation =
+          getInstanceStatusUpdateRepresentation(
+                                                clusterName,
+                                                instanceName,
+                                                resourceGroup);
+    }
+    catch (Exception e)
+    {
+      String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
+      presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
+
+      LOG.error("", e);
+    }
+    return presentation;
+  }
+
+  StringRepresentation getInstanceStatusUpdateRepresentation(
+                                                             String clusterName,
+                                                             String instanceName,
+                                                             String resourceGroup) throws JsonGenerationException,
+      JsonMappingException,
+      IOException
+  {
+    ZkClient zkClient = (ZkClient)getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
+  
+    String instanceSessionId =
+        ClusterRepresentationUtil.getInstanceSessionId(zkClient,
+                                                       clusterName,
+                                                       instanceName);
+
+    Builder keyBuilder = new PropertyKey.Builder(clusterName);
+    String message =
+        ClusterRepresentationUtil.getInstancePropertiesAsString(zkClient,
+                                                                  clusterName,
+                                                                  keyBuilder.stateTransitionStatus(instanceName,
+                                                                                                   instanceSessionId,
+                                                                                                   resourceGroup),
+                                                                  // instanceSessionId
+                                                                  // + "__"
+                                                                  // + resourceGroup,
+                                                                  MediaType.APPLICATION_JSON);
+    StringRepresentation representation =
+        new StringRepresentation(message, MediaType.APPLICATION_JSON);
+    return representation;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/StatusUpdatesResource.java
----------------------------------------------------------------------
diff --git a/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/StatusUpdatesResource.java b/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/StatusUpdatesResource.java
new file mode 100644
index 0000000..80a51df
--- /dev/null
+++ b/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/StatusUpdatesResource.java
@@ -0,0 +1,103 @@
+/**
+ * Copyright (C) 2012 LinkedIn Inc <op...@linkedin.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.helix.webapp.resources;
+
+import java.io.IOException;
+
+import org.apache.helix.PropertyType;
+import org.apache.helix.manager.zk.ZkClient;
+import org.apache.helix.webapp.RestAdminApplication;
+import org.apache.log4j.Logger;
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.restlet.Context;
+import org.restlet.data.MediaType;
+import org.restlet.data.Request;
+import org.restlet.data.Response;
+import org.restlet.resource.Representation;
+import org.restlet.resource.Resource;
+import org.restlet.resource.StringRepresentation;
+import org.restlet.resource.Variant;
+
+
+public class StatusUpdatesResource extends Resource
+{
+  private final static Logger LOG = Logger.getLogger(StatusUpdatesResource.class);
+
+  public StatusUpdatesResource(Context context, Request request, Response response)
+  {
+    super(context, request, response);
+    getVariants().add(new Variant(MediaType.TEXT_PLAIN));
+    getVariants().add(new Variant(MediaType.APPLICATION_JSON));
+  }
+
+  @Override
+  public boolean allowGet()
+  {
+    return true;
+  }
+
+  @Override
+  public boolean allowPost()
+  {
+    return false;
+  }
+
+  @Override
+  public boolean allowPut()
+  {
+    return false;
+  }
+
+  @Override
+  public boolean allowDelete()
+  {
+    return false;
+  }
+
+  @Override
+  public Representation represent(Variant variant)
+  {
+    StringRepresentation presentation = null;
+    try
+    {
+      String clusterName = (String) getRequest().getAttributes().get("clusterName");
+      String instanceName = (String) getRequest().getAttributes().get("instanceName");
+      presentation = getInstanceErrorsRepresentation( clusterName, instanceName);
+    }
+    catch (Exception e)
+    {
+      String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
+      presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
+
+      LOG.error("", e);
+    }
+    return presentation;
+  }
+
+  StringRepresentation getInstanceErrorsRepresentation( String clusterName, String instanceName) throws JsonGenerationException, JsonMappingException, IOException
+  {
+    ZkClient zkClient = (ZkClient)getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
+    String instanceSessionId = ClusterRepresentationUtil.getInstanceSessionId(zkClient, clusterName, instanceName);
+    
+    String message = ClusterRepresentationUtil.getInstancePropertyNameListAsString(zkClient, clusterName, instanceName, PropertyType.CURRENTSTATES, instanceSessionId, MediaType.APPLICATION_JSON);
+
+    StringRepresentation representation = new StringRepresentation(message, MediaType.APPLICATION_JSON);
+
+    return representation;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/ZkChildResource.java
----------------------------------------------------------------------
diff --git a/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/ZkChildResource.java b/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/ZkChildResource.java
new file mode 100644
index 0000000..1fab560
--- /dev/null
+++ b/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/ZkChildResource.java
@@ -0,0 +1,158 @@
+package org.apache.helix.webapp.resources;
+
+import java.util.List;
+
+import org.apache.helix.ZNRecord;
+import org.apache.helix.manager.zk.ZkClient;
+import org.apache.helix.webapp.RestAdminApplication;
+import org.apache.log4j.Logger;
+import org.apache.zookeeper.data.Stat;
+import org.restlet.Context;
+import org.restlet.data.MediaType;
+import org.restlet.data.Request;
+import org.restlet.data.Response;
+import org.restlet.data.Status;
+import org.restlet.resource.Representation;
+import org.restlet.resource.Resource;
+import org.restlet.resource.StringRepresentation;
+import org.restlet.resource.Variant;
+
+
+public class ZkChildResource extends Resource
+{
+  private final static Logger LOG = Logger.getLogger(ZkChildResource.class);
+
+  public ZkChildResource(Context context, Request request, Response response)
+  {
+    super(context, request, response);
+    getVariants().add(new Variant(MediaType.TEXT_PLAIN));
+    getVariants().add(new Variant(MediaType.APPLICATION_JSON));
+  }
+
+  @Override
+  public boolean allowGet()
+  {
+    return true;
+  }
+
+  @Override
+  public boolean allowPost()
+  {
+    return false;
+  }
+
+  @Override
+  public boolean allowPut()
+  {
+    return false;
+  }
+
+  @Override
+  public boolean allowDelete()
+  {
+    return true;
+  }
+
+  private String getZKPath()
+  {
+    String relativeRef = getRequest().getResourceRef().getRelativeRef().toString();
+    if (relativeRef.equals("."))
+    {
+      relativeRef = "";
+    }
+
+    // strip off trailing "/"
+    while (relativeRef.endsWith("/"))
+    {
+      relativeRef = relativeRef.substring(0, relativeRef.length() - 1);
+    }
+
+    return "/" + relativeRef;
+  }
+
+  @Override
+  public Representation represent(Variant variant)
+  {
+    StringRepresentation presentation = null;
+    String zkPath = getZKPath();
+
+    try
+    {
+      ZkClient zkClient =
+          (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
+      ZNRecord result = readZkChild(zkPath, zkClient);
+
+      presentation =
+          new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(result),
+                                   MediaType.APPLICATION_JSON);
+    }
+    catch (Exception e)
+    {
+      String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
+      presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
+
+      LOG.error("Error in read zkPath: " + zkPath, e);
+    }
+
+    return presentation;
+  }
+
+  private ZNRecord readZkChild(String zkPath, ZkClient zkClient)
+  {
+    ZNRecord result = null;
+
+    // read data and stat
+    Stat stat = new Stat();
+    ZNRecord data = zkClient.readDataAndStat(zkPath, stat, true);
+    if (data != null)
+    {
+      result = data;
+    }
+    else
+    {
+      result = new ZNRecord("");
+    }
+
+    // read childrenList
+    List<String> children = zkClient.getChildren(zkPath);
+    if (children != null && children.size() > 0)
+    {
+      result.setSimpleField("numChildren", "" + children.size());
+      result.setListField("childrenList", children);
+    } else
+    {
+      result.setSimpleField("numChildren", "" + 0);
+    }
+    return result;
+  }
+
+  @Override
+  public void removeRepresentations()
+  {
+    String zkPath = getZKPath();
+    try
+    {
+      ZkClient zkClient =
+          (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
+      
+      List<String> childNames = zkClient.getChildren(zkPath);
+      if (childNames != null)
+      {
+        for (String childName : childNames)
+        {
+          String childPath = zkPath.equals("/")? "/" + childName : zkPath + "/" + childName;
+          zkClient.deleteRecursive(childPath);
+        }
+      }
+      
+      getResponse().setStatus(Status.SUCCESS_OK);
+    }
+    catch (Exception e)
+    {
+      getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
+                              MediaType.APPLICATION_JSON);
+      getResponse().setStatus(Status.SUCCESS_OK);
+      LOG.error("Error in delete zkChild: " + zkPath, e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/ZkPathResource.java
----------------------------------------------------------------------
diff --git a/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/ZkPathResource.java b/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/ZkPathResource.java
new file mode 100644
index 0000000..ff29031
--- /dev/null
+++ b/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/ZkPathResource.java
@@ -0,0 +1,213 @@
+/**
+ * Copyright (C) 2012 LinkedIn Inc <op...@linkedin.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.helix.webapp.resources;
+
+import java.util.Date;
+import java.util.List;
+
+import org.apache.helix.HelixException;
+import org.apache.helix.ZNRecord;
+import org.apache.helix.manager.zk.ZkBaseDataAccessor;
+import org.apache.helix.manager.zk.ZkClient;
+import org.apache.helix.tools.ClusterSetup;
+import org.apache.helix.webapp.RestAdminApplication;
+import org.apache.log4j.Logger;
+import org.apache.zookeeper.data.Stat;
+import org.restlet.Context;
+import org.restlet.data.MediaType;
+import org.restlet.data.Request;
+import org.restlet.data.Response;
+import org.restlet.data.Status;
+import org.restlet.resource.Representation;
+import org.restlet.resource.Resource;
+import org.restlet.resource.StringRepresentation;
+import org.restlet.resource.Variant;
+
+
+public class ZkPathResource extends Resource
+{
+  private final static Logger LOG = Logger.getLogger(ZkPathResource.class);
+
+  public ZkPathResource(Context context, Request request, Response response)
+  {
+    super(context, request, response);
+    getVariants().add(new Variant(MediaType.TEXT_PLAIN));
+    getVariants().add(new Variant(MediaType.APPLICATION_JSON));
+  }
+
+  @Override
+  public boolean allowGet()
+  {
+    return true;
+  }
+
+  @Override
+  public boolean allowPost()
+  {
+    return true;
+  }
+
+  @Override
+  public boolean allowPut()
+  {
+    return false;
+  }
+
+  @Override
+  public boolean allowDelete()
+  {
+    return true;
+  }
+
+  private String getZKPath()
+  {
+    String relativeRef = getRequest().getResourceRef().getRelativeRef().toString();
+    if (relativeRef.equals("."))
+    {
+      relativeRef = "";
+    }
+
+    // strip off trailing "/"
+    while (relativeRef.endsWith("/"))
+    {
+      relativeRef = relativeRef.substring(0, relativeRef.length() - 1);
+    }
+
+    return "/" + relativeRef;
+  }
+
+  @Override
+  public void acceptRepresentation(Representation entity)
+  {
+    String zkPath = getZKPath();
+
+    try
+    {
+      JsonParameters jsonParameters = new JsonParameters(entity);
+      String command = jsonParameters.getCommand();
+
+      ZkClient zkClient =
+          (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
+      
+      if (command.equalsIgnoreCase(JsonParameters.ZK_DELETE_CHILDREN))
+      {
+        List<String> childNames = zkClient.getChildren(zkPath);
+        if (childNames != null)
+        {
+          for (String childName : childNames)
+          {
+            String childPath = zkPath.equals("/")? "/" + childName : zkPath + "/" + childName;
+            zkClient.deleteRecursive(childPath);
+          }
+        }
+      }
+      else
+      {
+        throw new HelixException("Unsupported command: " + command
+            + ". Should be one of [" + JsonParameters.ZK_DELETE_CHILDREN + "]");
+      }
+
+      getResponse().setStatus(Status.SUCCESS_OK);
+    }
+    catch (Exception e)
+    {
+      getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
+                              MediaType.APPLICATION_JSON);
+      getResponse().setStatus(Status.SUCCESS_OK);
+      LOG.error("Error in post zkPath: " + zkPath, e);
+    }
+  }
+
+  @Override
+  public Representation represent(Variant variant)
+  {
+    StringRepresentation presentation = null;
+    String zkPath = getZKPath();
+
+    try
+    {
+      ZkClient zkClient =
+          (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
+      ZNRecord result = readZkDataStatAndChild(zkPath, zkClient);
+
+      presentation =
+          new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(result),
+                                   MediaType.APPLICATION_JSON);
+    }
+    catch (Exception e)
+    {
+      String error = ClusterRepresentationUtil.getErrorAsJsonStringFromException(e);
+      presentation = new StringRepresentation(error, MediaType.APPLICATION_JSON);
+
+      LOG.error("Error in read zkPath: " + zkPath, e);
+    }
+
+    return presentation;
+  }
+
+  private ZNRecord readZkDataStatAndChild(String zkPath, ZkClient zkClient)
+  {
+    ZNRecord result = null;
+
+    // read data and stat
+    Stat stat = new Stat();
+    ZNRecord data = zkClient.readDataAndStat(zkPath, stat, true);
+    if (data != null)
+    {
+      result = data;
+    }
+    else
+    {
+      result = new ZNRecord("");
+    }
+    result.setSimpleField("zkPath", zkPath);
+    result.setSimpleField("stat", stat.toString());
+    result.setSimpleField("numChildren", "" + stat.getNumChildren());
+    result.setSimpleField("ctime", "" + new Date(stat.getCtime()));
+    result.setSimpleField("mtime", "" + new Date(stat.getMtime()));
+    result.setSimpleField("dataLength", "" + stat.getDataLength());
+
+    // read childrenList
+    List<String> children = zkClient.getChildren(zkPath);
+    if (children != null && children.size() > 0)
+    {
+      result.setListField("childrenList", children);
+    }
+    return result;
+  }
+
+  @Override
+  public void removeRepresentations()
+  {
+    String zkPath = getZKPath();
+    try
+    {
+      ZkClient zkClient =
+          (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT);
+      zkClient.deleteRecursive(zkPath);
+      
+      getResponse().setStatus(Status.SUCCESS_OK);
+    }
+    catch (Exception e)
+    {
+      getResponse().setEntity(ClusterRepresentationUtil.getErrorAsJsonStringFromException(e),
+                              MediaType.APPLICATION_JSON);
+      getResponse().setStatus(Status.SUCCESS_OK);
+      LOG.error("Error in delete zkPath: " + zkPath, e);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-admin-webapp/src/test/java/org/apache/helix/tools/AdminTestBase.java
----------------------------------------------------------------------
diff --git a/helix-admin-webapp/src/test/java/org/apache/helix/tools/AdminTestBase.java b/helix-admin-webapp/src/test/java/org/apache/helix/tools/AdminTestBase.java
new file mode 100644
index 0000000..c8eede8
--- /dev/null
+++ b/helix-admin-webapp/src/test/java/org/apache/helix/tools/AdminTestBase.java
@@ -0,0 +1,69 @@
+package org.apache.helix.tools;
+
+import java.util.logging.Level;
+
+import org.I0Itec.zkclient.ZkServer;
+import org.apache.helix.TestHelper;
+import org.apache.helix.manager.zk.ZNRecordSerializer;
+import org.apache.helix.manager.zk.ZkClient;
+import org.apache.helix.tools.ClusterSetup;
+import org.apache.helix.tools.AdminTestHelper.AdminThread;
+import org.apache.helix.util.ZKClientPool;
+import org.apache.log4j.Logger;
+import org.testng.AssertJUnit;
+import org.testng.annotations.AfterSuite;
+import org.testng.annotations.BeforeSuite;
+
+
+public class AdminTestBase
+{
+  private static Logger      LOG        = Logger.getLogger(AdminTestBase.class);
+  public static final String ZK_ADDR    = "localhost:2187";
+  protected final static int           ADMIN_PORT = 2202;
+
+  protected static ZkServer  _zkServer;
+  protected static ZkClient  _gZkClient;
+  protected static ClusterSetup _gSetupTool;
+
+  static AdminThread         _adminThread;
+
+  @BeforeSuite
+  public void beforeSuite() throws Exception
+  {
+    // TODO: use logging.properties file to config java.util.logging.Logger levels
+    java.util.logging.Logger topJavaLogger = java.util.logging.Logger.getLogger("");
+    topJavaLogger.setLevel(Level.WARNING);
+
+    // start zk
+    _zkServer = TestHelper.startZkSever(ZK_ADDR);
+    AssertJUnit.assertTrue(_zkServer != null);
+    ZKClientPool.reset();
+
+    _gZkClient = new ZkClient(ZK_ADDR);
+    _gZkClient.setZkSerializer(new ZNRecordSerializer());
+    _gSetupTool = new ClusterSetup(ZK_ADDR);
+    
+    // start admin
+    _adminThread = new AdminThread(ZK_ADDR, ADMIN_PORT);
+    _adminThread.start();
+    
+    // wait for the web service to start
+    Thread.sleep(100);
+  }
+
+  @AfterSuite
+  public void afterSuite()
+  {
+    // System.out.println("START AdminTestBase.afterSuite() at " + new Date(System.currentTimeMillis()));
+    // stop admin
+    _adminThread.stop();
+    
+    // stop zk
+    ZKClientPool.reset();
+    _gZkClient.close();
+
+    TestHelper.stopZkServer(_zkServer);
+    // System.out.println("END AdminTestBase.afterSuite() at " + new Date(System.currentTimeMillis()));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-admin-webapp/src/test/java/org/apache/helix/tools/AdminTestHelper.java
----------------------------------------------------------------------
diff --git a/helix-admin-webapp/src/test/java/org/apache/helix/tools/AdminTestHelper.java b/helix-admin-webapp/src/test/java/org/apache/helix/tools/AdminTestHelper.java
new file mode 100644
index 0000000..79709eb
--- /dev/null
+++ b/helix-admin-webapp/src/test/java/org/apache/helix/tools/AdminTestHelper.java
@@ -0,0 +1,64 @@
+package org.apache.helix.tools;
+
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.helix.webapp.HelixAdminWebApp;
+
+
+public class AdminTestHelper
+{
+
+  public static class AdminThread
+  {
+    Thread _adminThread;
+    CountDownLatch _stopCountDown = new CountDownLatch(1);
+    String _zkAddr;
+    int _port;
+    
+    public AdminThread(String zkAddr, int port)
+    {
+      _zkAddr = zkAddr;
+      _port = port;
+    }
+    
+    public void start()
+    {
+      Thread adminThread = new Thread(new Runnable()
+      {
+        @Override
+        public void run()
+        {
+          HelixAdminWebApp app = null;
+          try
+          {
+            app = new HelixAdminWebApp(_zkAddr, _port);
+            app.start();
+            // Thread.currentThread().join();
+            _stopCountDown.await();
+          }
+          catch (Exception e)
+          {
+            e.printStackTrace();
+          }
+          finally
+          {
+            if (app != null)
+            {
+//              System.err.println("Stopping HelixAdminWebApp");
+              app.stop();
+            }
+          }
+        }
+      });
+
+      adminThread.setDaemon(true);
+      adminThread.start();
+    }
+    
+    public void stop()
+    {
+      _stopCountDown.countDown();
+    }
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestHelixAdminScenariosRest.java
----------------------------------------------------------------------
diff --git a/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestHelixAdminScenariosRest.java b/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestHelixAdminScenariosRest.java
new file mode 100644
index 0000000..5133705
--- /dev/null
+++ b/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestHelixAdminScenariosRest.java
@@ -0,0 +1,741 @@
+package org.apache.helix.tools;
+
+/*
+ * Simulate all the admin tasks needed by using command line tool
+ * 
+ * */
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.helix.HelixDataAccessor;
+import org.apache.helix.TestHelper;
+import org.apache.helix.ZNRecord;
+import org.apache.helix.TestHelper.StartCMResult;
+import org.apache.helix.controller.HelixControllerMain;
+import org.apache.helix.manager.zk.ZKUtil;
+import org.apache.helix.model.ExternalView;
+import org.apache.helix.model.LiveInstance;
+import org.apache.helix.tools.ClusterSetup;
+import org.apache.helix.tools.ClusterStateVerifier;
+import org.apache.helix.tools.ClusterStateVerifier.BestPossAndExtViewZkVerifier;
+import org.apache.helix.tools.ClusterStateVerifier.MasterNbInExtViewVerifier;
+import org.apache.helix.webapp.RestAdminApplication;
+import org.apache.helix.webapp.resources.ClusterRepresentationUtil;
+import org.apache.helix.webapp.resources.JsonParameters;
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.SerializationConfig;
+import org.restlet.Client;
+import org.restlet.Component;
+import org.restlet.data.MediaType;
+import org.restlet.data.Method;
+import org.restlet.data.Protocol;
+import org.restlet.data.Reference;
+import org.restlet.data.Request;
+import org.restlet.data.Response;
+import org.restlet.data.Status;
+import org.restlet.resource.Representation;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+
+public class TestHelixAdminScenariosRest extends AdminTestBase
+{
+  Map<String, StartCMResult> _startCMResultMap = new HashMap<String, StartCMResult>();
+  RestAdminApplication       _adminApp;
+  Component                  _component;
+
+  public static String ObjectToJson(Object object) throws JsonGenerationException,
+      JsonMappingException,
+      IOException
+  {
+    ObjectMapper mapper = new ObjectMapper();
+    SerializationConfig serializationConfig = mapper.getSerializationConfig();
+    serializationConfig.set(SerializationConfig.Feature.INDENT_OUTPUT, true);
+
+    StringWriter sw = new StringWriter();
+    mapper.writeValue(sw, object);
+
+    return sw.toString();
+  }
+
+  public static <T extends Object> T JsonToObject(Class<T> clazz, String jsonString) throws JsonParseException,
+      JsonMappingException,
+      IOException
+  {
+    StringReader sr = new StringReader(jsonString);
+    ObjectMapper mapper = new ObjectMapper();
+    return mapper.readValue(sr, clazz);
+  }
+
+  @Test
+  public void testAddDeleteClusterAndInstanceAndResource() throws Exception
+  {
+    // Helix bug helix-102
+    // ZKPropertyTransferServer.PERIOD = 500;
+    // ZkPropertyTransferClient.SEND_PERIOD = 500;
+    // ZKPropertyTransferServer.getInstance().init(19999, ZK_ADDR);
+
+    /** ======================= Add clusters ============================== */
+
+    testAddCluster();
+
+    /** ================= Add / drop some resources =========================== */
+
+    testAddResource();
+
+    /** ====================== Add / delete instances =========================== */
+
+    testAddInstance();
+
+    /** ===================== Rebalance resource =========================== */
+
+    testRebalanceResource();
+
+    /** ==================== start the clusters ============================= */
+
+    testStartCluster();
+
+    /** ==================== drop add resource in live clusters =================== */
+    testDropAddResource();
+
+    /** ======================Operations with live node ============================ */
+
+    testInstanceOperations();
+
+    /** ======================Operations with partitions ============================ */
+
+    testEnablePartitions();
+
+    /** ============================ expand cluster =========================== */
+
+    testExpandCluster();
+
+    /** ============================ deactivate cluster =========================== */
+    testDeactivateCluster();
+
+    // wait all zk callbacks done
+    Thread.sleep(1000);
+  }
+
+  static String assertSuccessPostOperation(String url,
+                                           Map<String, String> jsonParameters,
+                                           boolean hasException) throws IOException
+  {
+    Reference resourceRef = new Reference(url);
+
+    Request request = new Request(Method.POST, resourceRef);
+    request.setEntity(JsonParameters.JSON_PARAMETERS + "="
+                          + ClusterRepresentationUtil.ObjectToJson(jsonParameters),
+                      MediaType.APPLICATION_ALL);
+    Client client = new Client(Protocol.HTTP);
+    Response response = client.handle(request);
+    Representation result = response.getEntity();
+    StringWriter sw = new StringWriter();
+    result.write(sw);
+
+    Assert.assertTrue(response.getStatus().getCode() == Status.SUCCESS_OK.getCode());
+    Assert.assertTrue(hasException == sw.toString().toLowerCase().contains("exception"));
+    return sw.toString();
+  }
+
+  static String assertSuccessPostOperation(String url,
+                                           Map<String, String> jsonParameters,
+                                           Map<String, String> extraForm,
+                                           boolean hasException) throws IOException
+  {
+    Reference resourceRef = new Reference(url);
+
+    Request request = new Request(Method.POST, resourceRef);
+    String entity =
+        JsonParameters.JSON_PARAMETERS + "="
+            + ClusterRepresentationUtil.ObjectToJson(jsonParameters);
+    for (String key : extraForm.keySet())
+    {
+      entity = entity + "&" + (key + "=" + extraForm.get(key));
+    }
+    request.setEntity(entity, MediaType.APPLICATION_ALL);
+    Client client = new Client(Protocol.HTTP);
+    Response response = client.handle(request);
+    Representation result = response.getEntity();
+    StringWriter sw = new StringWriter();
+    result.write(sw);
+
+    Assert.assertTrue(response.getStatus().getCode() == Status.SUCCESS_OK.getCode());
+    Assert.assertTrue(hasException == sw.toString().toLowerCase().contains("exception"));
+    return sw.toString();
+  }
+
+  void deleteUrl(String url, boolean hasException) throws IOException
+  {
+    Reference resourceRef = new Reference(url);
+    Request request = new Request(Method.DELETE, resourceRef);
+    Client client = new Client(Protocol.HTTP);
+    Response response = client.handle(request);
+    Representation result = response.getEntity();
+    StringWriter sw = new StringWriter();
+    result.write(sw);
+    Assert.assertTrue(hasException == sw.toString().toLowerCase().contains("exception"));
+  }
+
+  String getUrl(String url) throws IOException
+  {
+    Reference resourceRef = new Reference(url);
+    Request request = new Request(Method.GET, resourceRef);
+    Client client = new Client(Protocol.HTTP);
+    Response response = client.handle(request);
+    Representation result = response.getEntity();
+    StringWriter sw = new StringWriter();
+    result.write(sw);
+    return sw.toString();
+  }
+
+  String getClusterUrl(String cluster)
+  {
+    return "http://localhost:" + ADMIN_PORT + "/clusters" + "/" + cluster;
+  }
+
+  String getInstanceUrl(String cluster, String instance)
+  {
+    return "http://localhost:" + ADMIN_PORT + "/clusters/" + cluster + "/instances/"
+        + instance;
+  }
+
+  String getResourceUrl(String cluster, String resourceGroup)
+  {
+    return "http://localhost:" + ADMIN_PORT + "/clusters/" + cluster + "/resourceGroups/"
+        + resourceGroup;
+  }
+
+  void assertClusterSetupException(String command)
+  {
+    boolean exceptionThrown = false;
+    try
+    {
+      ClusterSetup.processCommandLineArgs(command.split(" "));
+    }
+    catch (Exception e)
+    {
+      exceptionThrown = true;
+    }
+    Assert.assertTrue(exceptionThrown);
+  }
+
+  public void testAddCluster() throws Exception
+  {
+    String url = "http://localhost:" + ADMIN_PORT + "/clusters";
+    Map<String, String> paraMap = new HashMap<String, String>();
+
+    // Normal add
+    paraMap.put(JsonParameters.CLUSTER_NAME, "clusterTest");
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addCluster);
+
+    String response = assertSuccessPostOperation(url, paraMap, false);
+    Assert.assertTrue(response.contains("clusterTest"));
+
+    // malformed cluster name
+    paraMap.put(JsonParameters.CLUSTER_NAME, "/ClusterTest");
+    response = assertSuccessPostOperation(url, paraMap, true);
+
+    // Add the grand cluster
+    paraMap.put(JsonParameters.CLUSTER_NAME, "Klazt3rz");
+    response = assertSuccessPostOperation(url, paraMap, false);
+    Assert.assertTrue(response.contains("Klazt3rz"));
+
+    paraMap.put(JsonParameters.CLUSTER_NAME, "\\ClusterTest");
+    response = assertSuccessPostOperation(url, paraMap, false);
+    Assert.assertTrue(response.contains("\\ClusterTest"));
+
+    // Add already exist cluster
+    paraMap.put(JsonParameters.CLUSTER_NAME, "clusterTest");
+    response = assertSuccessPostOperation(url, paraMap, true);
+
+    // delete cluster without resource and instance
+    Assert.assertTrue(ZKUtil.isClusterSetup("Klazt3rz", _gZkClient));
+    Assert.assertTrue(ZKUtil.isClusterSetup("clusterTest", _gZkClient));
+    Assert.assertTrue(ZKUtil.isClusterSetup("\\ClusterTest", _gZkClient));
+
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.dropCluster);
+
+    String clusterUrl = getClusterUrl("\\ClusterTest");
+    deleteUrl(clusterUrl, false);
+
+    String clustersUrl = "http://localhost:" + ADMIN_PORT + "/clusters";
+    response = getUrl(clustersUrl);
+
+    clusterUrl = getClusterUrl("clusterTest1");
+    deleteUrl(clusterUrl, false);
+    response = getUrl(clustersUrl);
+    Assert.assertFalse(response.contains("clusterTest1"));
+
+    clusterUrl = getClusterUrl("clusterTest");
+    deleteUrl(clusterUrl, false);
+    response = getUrl(clustersUrl);
+    Assert.assertFalse(response.contains("clusterTest"));
+
+    clusterUrl = getClusterUrl("clusterTestOK");
+    deleteUrl(clusterUrl, false);
+
+    Assert.assertFalse(_gZkClient.exists("/clusterTest"));
+    Assert.assertFalse(_gZkClient.exists("/clusterTest1"));
+    Assert.assertFalse(_gZkClient.exists("/clusterTestOK"));
+
+    paraMap.put(JsonParameters.CLUSTER_NAME, "clusterTest1");
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addCluster);
+    response = assertSuccessPostOperation(url, paraMap, false);
+    response = getUrl(clustersUrl);
+    Assert.assertTrue(response.contains("clusterTest1"));
+  }
+
+  public void testAddResource() throws Exception
+  {
+    String reourcesUrl =
+        "http://localhost:" + ADMIN_PORT + "/clusters/clusterTest1/resourceGroups";
+
+    Map<String, String> paraMap = new HashMap<String, String>();
+    paraMap.put(JsonParameters.RESOURCE_GROUP_NAME, "db_22");
+    paraMap.put(JsonParameters.STATE_MODEL_DEF_REF, "MasterSlave");
+    paraMap.put(JsonParameters.PARTITIONS, "144");
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addResource);
+
+    String response = assertSuccessPostOperation(reourcesUrl, paraMap, false);
+    Assert.assertTrue(response.contains("db_22"));
+
+    paraMap.put(JsonParameters.RESOURCE_GROUP_NAME, "db_11");
+    paraMap.put(JsonParameters.STATE_MODEL_DEF_REF, "MasterSlave");
+    paraMap.put(JsonParameters.PARTITIONS, "44");
+
+    response = assertSuccessPostOperation(reourcesUrl, paraMap, false);
+    Assert.assertTrue(response.contains("db_11"));
+
+    // Add duplicate resource
+    paraMap.put(JsonParameters.RESOURCE_GROUP_NAME, "db_22");
+    paraMap.put(JsonParameters.STATE_MODEL_DEF_REF, "OnlineOffline");
+    paraMap.put(JsonParameters.PARTITIONS, "55");
+
+    response = assertSuccessPostOperation(reourcesUrl, paraMap, true);
+
+    // drop resource now
+    String resourceUrl = getResourceUrl("clusterTest1", "db_11");
+    deleteUrl(resourceUrl, false);
+    Assert.assertFalse(_gZkClient.exists("/clusterTest1/IDEALSTATES/db_11"));
+
+    paraMap.put(JsonParameters.RESOURCE_GROUP_NAME, "db_11");
+    paraMap.put(JsonParameters.STATE_MODEL_DEF_REF, "MasterSlave");
+    paraMap.put(JsonParameters.PARTITIONS, "44");
+    response = assertSuccessPostOperation(reourcesUrl, paraMap, false);
+    Assert.assertTrue(response.contains("db_11"));
+
+    Assert.assertTrue(_gZkClient.exists("/clusterTest1/IDEALSTATES/db_11"));
+  }
+
+  private void testDeactivateCluster() throws Exception,
+      InterruptedException
+  {
+    HelixDataAccessor accessor;
+    String path;
+    // deactivate cluster
+    String clusterUrl = getClusterUrl("clusterTest1");
+    Map<String, String> paraMap = new HashMap<String, String>();
+    paraMap.put(JsonParameters.ENABLED, "false");
+    paraMap.put(JsonParameters.GRAND_CLUSTER, "Klazt3rz");
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.activateCluster);
+
+    String response = assertSuccessPostOperation(clusterUrl, paraMap, false);
+    Thread.sleep(6000);
+    Assert.assertFalse(_gZkClient.exists("/Klazt3rz/IDEALSTATES/clusterTest1"));
+
+    accessor = _startCMResultMap.get("localhost_1231")._manager.getHelixDataAccessor();
+    path = accessor.keyBuilder().controllerLeader().getPath();
+    Assert.assertFalse(_gZkClient.exists(path));
+
+    deleteUrl(clusterUrl, true);
+
+    Assert.assertTrue(_gZkClient.exists("/clusterTest1"));
+    // leader node should be gone
+    for (StartCMResult result : _startCMResultMap.values())
+    {
+      result._manager.disconnect();
+      result._thread.interrupt();
+    }
+    deleteUrl(clusterUrl, false);
+
+    Assert.assertFalse(_gZkClient.exists("/clusterTest1"));
+  }
+
+  private void testDropAddResource() throws Exception
+  {
+    ZNRecord record =
+        _gSetupTool._admin.getResourceIdealState("clusterTest1", "db_11").getRecord();
+    String x = ObjectToJson(record);
+
+    FileWriter fos = new FileWriter("/tmp/temp.log");
+    PrintWriter pw = new PrintWriter(fos);
+    pw.write(x);
+    pw.close();
+
+    String resourceUrl = getResourceUrl("clusterTest1", "db_11");
+    deleteUrl(resourceUrl, false);
+
+    boolean verifyResult =
+        ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
+                                                                                 "clusterTest1"));
+    Assert.assertTrue(verifyResult);
+    Map<String, String> paraMap = new HashMap<String, String>();
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addResource);
+    paraMap.put(JsonParameters.RESOURCE_GROUP_NAME, "db_11");
+    paraMap.put(JsonParameters.PARTITIONS, "22");
+    paraMap.put(JsonParameters.STATE_MODEL_DEF_REF, "MasterSlave");
+    String response =
+        assertSuccessPostOperation(getClusterUrl("clusterTest1") + "/resourceGroups",
+                                   paraMap,
+                                   false);
+
+    String idealStateUrl = getResourceUrl("clusterTest1", "db_11") + "/idealState";
+    Assert.assertTrue(response.contains("db_11"));
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addIdealState);
+    Map<String, String> extraform = new HashMap<String, String>();
+    extraform.put(JsonParameters.NEW_IDEAL_STATE, x);
+    response = assertSuccessPostOperation(idealStateUrl, paraMap, extraform, false);
+
+    verifyResult =
+        ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
+                                                                                 "clusterTest1"));
+    Assert.assertTrue(verifyResult);
+
+    ZNRecord record2 =
+        _gSetupTool._admin.getResourceIdealState("clusterTest1", "db_11").getRecord();
+    Assert.assertTrue(record2.equals(record));
+  }
+
+  private void testExpandCluster() throws Exception
+  {
+    boolean verifyResult;
+
+    String clusterUrl = getClusterUrl("clusterTest1");
+    String instancesUrl = clusterUrl + "/instances";
+
+    Map<String, String> paraMap = new HashMap<String, String>();
+    paraMap.put(JsonParameters.INSTANCE_NAMES,
+                "localhost:12331;localhost:12341;localhost:12351;localhost:12361");
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addInstance);
+
+    String response = assertSuccessPostOperation(instancesUrl, paraMap, false);
+    String[] hosts =
+        "localhost:12331;localhost:12341;localhost:12351;localhost:12361".split(";");
+    for (String host : hosts)
+    {
+      Assert.assertTrue(response.contains(host.replace(':', '_')));
+    }
+    paraMap.clear();
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.expandCluster);
+    response = assertSuccessPostOperation(clusterUrl, paraMap, false);
+
+    for (int i = 3; i <= 6; i++)
+    {
+      StartCMResult result =
+          TestHelper.startDummyProcess(ZK_ADDR, "clusterTest1", "localhost_123" + i + "1");
+      _startCMResultMap.put("localhost_123" + i + "1", result);
+    }
+
+    verifyResult =
+        ClusterStateVerifier.verifyByZkCallback(new MasterNbInExtViewVerifier(ZK_ADDR,
+                                                                              "clusterTest1"));
+    Assert.assertTrue(verifyResult);
+
+    verifyResult =
+        ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
+                                                                                 "clusterTest1"));
+    Assert.assertTrue(verifyResult);
+  }
+
+  private void testEnablePartitions() throws IOException,
+      InterruptedException
+  {
+    HelixDataAccessor accessor;
+    accessor = _startCMResultMap.get("localhost_1231")._manager.getHelixDataAccessor();
+    // drop node should fail as not disabled
+    String hostName = "localhost_1231";
+    String instanceUrl = getInstanceUrl("clusterTest1", hostName);
+    ExternalView ev = accessor.getProperty(accessor.keyBuilder().externalView("db_11"));
+
+    Map<String, String> paraMap = new HashMap<String, String>();
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.enablePartition);
+    paraMap.put(JsonParameters.ENABLED, "false");
+    paraMap.put(JsonParameters.PARTITION, "db_11_0;db_11_15");
+    paraMap.put(JsonParameters.RESOURCE, "db_11");
+
+    String response = assertSuccessPostOperation(instanceUrl, paraMap, false);
+    Assert.assertTrue(response.contains("DISABLED_PARTITION"));
+    Assert.assertTrue(response.contains("db_11_0"));
+    Assert.assertTrue(response.contains("db_11_15"));
+
+    boolean verifyResult =
+        ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
+                                                                                 "clusterTest1"));
+    Assert.assertTrue(verifyResult);
+
+    ev = accessor.getProperty(accessor.keyBuilder().externalView("db_11"));
+    Assert.assertEquals(ev.getStateMap("db_11_0").get(hostName), "OFFLINE");
+    Assert.assertEquals(ev.getStateMap("db_11_15").get(hostName), "OFFLINE");
+
+    paraMap.put(JsonParameters.ENABLED, "true");
+    response = assertSuccessPostOperation(instanceUrl, paraMap, false);
+    Assert.assertFalse(response.contains("db_11_0"));
+    Assert.assertFalse(response.contains("db_11_15"));
+
+    verifyResult =
+        ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
+                                                                                 "clusterTest1"));
+    Assert.assertTrue(verifyResult);
+
+    ev = accessor.getProperty(accessor.keyBuilder().externalView("db_11"));
+    Assert.assertEquals(ev.getStateMap("db_11_0").get(hostName), "MASTER");
+    Assert.assertEquals(ev.getStateMap("db_11_15").get(hostName), "SLAVE");
+  }
+
+  private void testInstanceOperations() throws Exception
+  {
+    HelixDataAccessor accessor;
+    // drop node should fail as not disabled
+    String instanceUrl = getInstanceUrl("clusterTest1", "localhost_1232");
+    deleteUrl(instanceUrl, true);
+
+    // disabled node
+    Map<String, String> paraMap = new HashMap<String, String>();
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.enableInstance);
+    paraMap.put(JsonParameters.ENABLED, "false");
+    String response = assertSuccessPostOperation(instanceUrl, paraMap, false);
+    Assert.assertTrue(response.contains("false"));
+
+    // Cannot drop / swap
+    deleteUrl(instanceUrl, true);
+
+    String instancesUrl = getClusterUrl("clusterTest1") + "/instances";
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.swapInstance);
+    paraMap.put(JsonParameters.OLD_INSTANCE, "localhost_1232");
+    paraMap.put(JsonParameters.NEW_INSTANCE, "localhost_12320");
+    response = assertSuccessPostOperation(instancesUrl, paraMap, true);
+
+    // disconnect the node
+    _startCMResultMap.get("localhost_1232")._manager.disconnect();
+    _startCMResultMap.get("localhost_1232")._thread.interrupt();
+
+    // add new node then swap instance
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addInstance);
+    paraMap.put(JsonParameters.INSTANCE_NAME, "localhost_12320");
+    response = assertSuccessPostOperation(instancesUrl, paraMap, false);
+    Assert.assertTrue(response.contains("localhost_12320"));
+
+    // swap instance. The instance get swapped out should not exist anymore
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.swapInstance);
+    paraMap.put(JsonParameters.OLD_INSTANCE, "localhost_1232");
+    paraMap.put(JsonParameters.NEW_INSTANCE, "localhost_12320");
+    response = assertSuccessPostOperation(instancesUrl, paraMap, false);
+    Assert.assertTrue(response.contains("localhost_12320"));
+    Assert.assertFalse(response.contains("localhost_1232\""));
+
+    accessor = _startCMResultMap.get("localhost_1231")._manager.getHelixDataAccessor();
+    String path = accessor.keyBuilder().instanceConfig("localhost_1232").getPath();
+    Assert.assertFalse(_gZkClient.exists(path));
+
+    _startCMResultMap.put("localhost_12320",
+                          TestHelper.startDummyProcess(ZK_ADDR,
+                                                       "clusterTest1",
+                                                       "localhost_12320"));
+  }
+
+  private void testStartCluster() throws Exception,
+      InterruptedException
+  {
+    // start mock nodes
+    for (int i = 0; i < 6; i++)
+    {
+      StartCMResult result =
+          TestHelper.startDummyProcess(ZK_ADDR, "clusterTest1", "localhost_123" + i);
+      _startCMResultMap.put("localhost_123" + i, result);
+    }
+
+    // start controller nodes
+    for (int i = 0; i < 2; i++)
+    {
+      StartCMResult result =
+          TestHelper.startController("Klazt3rz",
+                                     "controller_900" + i,
+                                     ZK_ADDR,
+                                     HelixControllerMain.DISTRIBUTED);
+
+      _startCMResultMap.put("controller_900" + i, result);
+    }
+    Thread.sleep(100);
+
+    // activate clusters
+    // wrong grand clustername
+
+    String clusterUrl = getClusterUrl("clusterTest1");
+    Map<String, String> paraMap = new HashMap<String, String>();
+    paraMap.put(JsonParameters.ENABLED, "true");
+    paraMap.put(JsonParameters.GRAND_CLUSTER, "Klazters");
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.activateCluster);
+
+    String response = assertSuccessPostOperation(clusterUrl, paraMap, true);
+
+    // wrong cluster name
+    clusterUrl = getClusterUrl("clusterTest2");
+    paraMap.put(JsonParameters.GRAND_CLUSTER, "Klazt3rz");
+    response = assertSuccessPostOperation(clusterUrl, paraMap, true);
+
+    paraMap.put(JsonParameters.ENABLED, "true");
+    paraMap.put(JsonParameters.GRAND_CLUSTER, "Klazt3rz");
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.activateCluster);
+    clusterUrl = getClusterUrl("clusterTest1");
+    response = assertSuccessPostOperation(clusterUrl, paraMap, false);
+    Thread.sleep(500);
+
+    deleteUrl(clusterUrl, true);
+
+    // verify leader node
+    HelixDataAccessor accessor =
+        _startCMResultMap.get("controller_9001")._manager.getHelixDataAccessor();
+    LiveInstance controllerLeader =
+        accessor.getProperty(accessor.keyBuilder().controllerLeader());
+    Assert.assertTrue(controllerLeader.getInstanceName().startsWith("controller_900"));
+
+    accessor = _startCMResultMap.get("localhost_1232")._manager.getHelixDataAccessor();
+    LiveInstance leader = accessor.getProperty(accessor.keyBuilder().controllerLeader());
+    Assert.assertTrue(leader.getInstanceName().startsWith("controller_900"));
+
+    boolean verifyResult =
+        ClusterStateVerifier.verifyByZkCallback(new MasterNbInExtViewVerifier(ZK_ADDR,
+                                                                              "clusterTest1"));
+    Assert.assertTrue(verifyResult);
+
+    verifyResult =
+        ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
+                                                                                 "clusterTest1"));
+    Assert.assertTrue(verifyResult);
+  }
+
+  private void testRebalanceResource() throws Exception
+  {
+    String resourceUrl = getResourceUrl("clusterTest1", "db_11");
+    Map<String, String> paraMap = new HashMap<String, String>();
+    paraMap.put(JsonParameters.REPLICAS, "3");
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.rebalance);
+
+    String ISUrl = resourceUrl + "/idealState";
+    String response = assertSuccessPostOperation(ISUrl, paraMap, false);
+    ZNRecord record = JsonToObject(ZNRecord.class, response);
+    Assert.assertTrue(record.getId().equalsIgnoreCase("db_11"));
+    Assert.assertTrue((((List<String>) (record.getListFields().values().toArray()[0]))).size() == 3);
+    Assert.assertTrue((((Map<String, String>) (record.getMapFields().values().toArray()[0]))).size() == 3);
+
+    deleteUrl(resourceUrl, false);
+
+    // re-add and rebalance
+    String reourcesUrl =
+        "http://localhost:" + ADMIN_PORT + "/clusters/clusterTest1/resourceGroups";
+    response = getUrl(reourcesUrl);
+    Assert.assertFalse(response.contains("db_11"));
+
+    paraMap = new HashMap<String, String>();
+    paraMap.put(JsonParameters.RESOURCE_GROUP_NAME, "db_11");
+    paraMap.put(JsonParameters.STATE_MODEL_DEF_REF, "MasterSlave");
+    paraMap.put(JsonParameters.PARTITIONS, "48");
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addResource);
+
+    response = assertSuccessPostOperation(reourcesUrl, paraMap, false);
+    Assert.assertTrue(response.contains("db_11"));
+
+    ISUrl = resourceUrl + "/idealState";
+    paraMap.put(JsonParameters.REPLICAS, "3");
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.rebalance);
+    response = assertSuccessPostOperation(ISUrl, paraMap, false);
+    record = JsonToObject(ZNRecord.class, response);
+    Assert.assertTrue(record.getId().equalsIgnoreCase("db_11"));
+    Assert.assertTrue((((List<String>) (record.getListFields().values().toArray()[0]))).size() == 3);
+    Assert.assertTrue((((Map<String, String>) (record.getMapFields().values().toArray()[0]))).size() == 3);
+
+    // rebalance with key prefix
+    resourceUrl = getResourceUrl("clusterTest1", "db_22");
+    ISUrl = resourceUrl + "/idealState";
+    paraMap.put(JsonParameters.REPLICAS, "2");
+    paraMap.put(JsonParameters.RESOURCE_KEY_PREFIX, "alias");
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.rebalance);
+    response = assertSuccessPostOperation(ISUrl, paraMap, false);
+    record = JsonToObject(ZNRecord.class, response);
+    Assert.assertTrue(record.getId().equalsIgnoreCase("db_22"));
+    Assert.assertTrue((((List<String>) (record.getListFields().values().toArray()[0]))).size() == 2);
+    Assert.assertTrue((((Map<String, String>) (record.getMapFields().values().toArray()[0]))).size() == 2);
+    Assert.assertTrue((((String) (record.getMapFields().keySet().toArray()[0]))).startsWith("alias_"));
+  }
+
+  private void testAddInstance() throws Exception
+  {
+    String clusterUrl = getClusterUrl("clusterTest1");
+    Map<String, String> paraMap = new HashMap<String, String>();
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addInstance);
+    String response = null;
+    // Add instances to cluster
+    String instancesUrl = clusterUrl + "/instances";
+    for (int i = 0; i < 3; i++)
+    {
+
+      paraMap.put(JsonParameters.INSTANCE_NAME, "localhost:123" + i);
+      response = assertSuccessPostOperation(instancesUrl, paraMap, false);
+      Assert.assertTrue(response.contains(("localhost:123" + i).replace(':', '_')));
+    }
+    paraMap.remove(JsonParameters.INSTANCE_NAME);
+    paraMap.put(JsonParameters.INSTANCE_NAMES,
+                "localhost:1233;localhost:1234;localhost:1235;localhost:1236");
+
+    response = assertSuccessPostOperation(instancesUrl, paraMap, false);
+    for (int i = 3; i <= 6; i++)
+    {
+      Assert.assertTrue(response.contains("localhost_123" + i));
+    }
+
+    // delete one node without disable
+    String instanceUrl = instancesUrl + "/localhost_1236";
+    deleteUrl(instanceUrl, true);
+    response = getUrl(instancesUrl);
+    Assert.assertTrue(response.contains("localhost_1236"));
+
+    // delete non-exist node
+    instanceUrl = instancesUrl + "/localhost_12367";
+    deleteUrl(instanceUrl, true);
+    response = getUrl(instancesUrl);
+    Assert.assertFalse(response.contains("localhost_12367"));
+
+    // disable node
+    instanceUrl = instancesUrl + "/localhost_1236";
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.enableInstance);
+    paraMap.put(JsonParameters.ENABLED, "false");
+    response = assertSuccessPostOperation(instanceUrl, paraMap, false);
+    Assert.assertTrue(response.contains("false"));
+
+    deleteUrl(instanceUrl, false);
+
+    // add node to controller cluster
+    paraMap.remove(JsonParameters.INSTANCE_NAME);
+    paraMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.addInstance);
+    paraMap.put(JsonParameters.INSTANCE_NAMES, "controller:9000;controller:9001");
+    String controllerUrl = getClusterUrl("Klazt3rz") + "/instances";
+    response = assertSuccessPostOperation(controllerUrl, paraMap, false);
+    Assert.assertTrue(response.contains("controller_9000"));
+    Assert.assertTrue(response.contains("controller_9001"));
+
+    // add a dup host
+    paraMap.remove(JsonParameters.INSTANCE_NAMES);
+    paraMap.put(JsonParameters.INSTANCE_NAME, "localhost:1234");
+    response = assertSuccessPostOperation(instancesUrl, paraMap, true);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetInstance.java
----------------------------------------------------------------------
diff --git a/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetInstance.java b/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetInstance.java
new file mode 100644
index 0000000..3e91fad
--- /dev/null
+++ b/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetInstance.java
@@ -0,0 +1,119 @@
+package org.apache.helix.tools;
+
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.helix.TestHelper;
+import org.apache.helix.mock.controller.ClusterController;
+import org.apache.helix.mock.storage.MockParticipant;
+import org.apache.helix.mock.storage.MockParticipant.ErrTransition;
+import org.apache.helix.tools.ClusterSetup;
+import org.apache.helix.tools.ClusterStateVerifier;
+import org.apache.helix.webapp.resources.JsonParameters;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+
+public class TestResetInstance extends AdminTestBase
+{
+  @Test
+  public void testResetInstance() throws Exception
+  {
+    String className = TestHelper.getTestClassName();
+    String methodName = TestHelper.getTestMethodName();
+    String clusterName = className + "_" + methodName;
+    final int n = 5;
+
+    System.out.println("START " + clusterName + " at "
+        + new Date(System.currentTimeMillis()));
+
+    TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
+                            "localhost", // participant name prefix
+                            "TestDB", // resource name prefix
+                            1, // resources
+                            10, // partitions per resource
+                            n, // number of nodes
+                            3, // replicas
+                            "MasterSlave",
+                            true); // do rebalance
+    
+//    // start admin thread
+//    AdminThread adminThread = new AdminThread(ZK_ADDR, _port);
+//    adminThread.start();
+
+    // start controller
+    ClusterController controller =
+        new ClusterController(clusterName, "controller_0", ZK_ADDR);
+    controller.syncStart();
+
+    Map<String, Set<String>> errPartitions = new HashMap<String, Set<String>>()
+    {
+      {
+        put("SLAVE-MASTER", TestHelper.setOf("TestDB0_4"));
+        put("OFFLINE-SLAVE", TestHelper.setOf("TestDB0_8"));
+      }
+    };
+
+    // start mock participants
+    MockParticipant[] participants = new MockParticipant[n];
+    for (int i = 0; i < n; i++)
+    {
+      String instanceName = "localhost_" + (12918 + i);
+
+      if (i == 0)
+      {
+        participants[i] =
+            new MockParticipant(clusterName,
+                                instanceName,
+                                ZK_ADDR,
+                                new ErrTransition(errPartitions));
+      }
+      else
+      {
+        participants[i] = new MockParticipant(clusterName, instanceName, ZK_ADDR);
+      }
+      participants[i].syncStart();
+    }
+
+    // verify cluster
+    Map<String, Map<String, String>> errStateMap =
+        new HashMap<String, Map<String, String>>();
+    errStateMap.put("TestDB0", new HashMap<String, String>());
+    errStateMap.get("TestDB0").put("TestDB0_4", "localhost_12918");
+    errStateMap.get("TestDB0").put("TestDB0_8", "localhost_12918");
+    boolean result =
+        ClusterStateVerifier.verifyByZkCallback((new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR,
+                                                                                                       clusterName,
+                                                                                                       errStateMap)));
+    Assert.assertTrue(result, "Cluster verification fails");
+
+    // reset node "localhost_12918"
+    participants[0].setTransition(null);
+    String hostName = "localhost_12918";
+    String instanceUrl = "http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/instances/" + hostName;
+
+    Map<String, String> paramMap = new HashMap<String, String>();
+    paramMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.resetInstance);
+    TestHelixAdminScenariosRest.assertSuccessPostOperation(instanceUrl, paramMap, false);
+    
+    result =
+        ClusterStateVerifier.verifyByZkCallback((new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR,
+                                                                                                       clusterName)));
+    Assert.assertTrue(result, "Cluster verification fails");
+    
+    // clean up
+    // wait for all zk callbacks done
+    Thread.sleep(1000);
+//    adminThread.stop();
+    controller.syncStop();
+    for (int i = 0; i < 5; i++)
+    {
+      participants[i].syncStop();
+    }
+
+    System.out.println("END " + clusterName + " at "
+        + new Date(System.currentTimeMillis()));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetPartitionState.java
----------------------------------------------------------------------
diff --git a/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetPartitionState.java b/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetPartitionState.java
new file mode 100644
index 0000000..5715668
--- /dev/null
+++ b/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetPartitionState.java
@@ -0,0 +1,213 @@
+/**
+ * Copyright (C) 2012 LinkedIn Inc <op...@linkedin.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.helix.tools;
+
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.helix.NotificationContext;
+import org.apache.helix.TestHelper;
+import org.apache.helix.ZNRecord;
+import org.apache.helix.PropertyKey.Builder;
+import org.apache.helix.manager.zk.ZKHelixDataAccessor;
+import org.apache.helix.manager.zk.ZkBaseDataAccessor;
+import org.apache.helix.mock.controller.ClusterController;
+import org.apache.helix.mock.storage.MockParticipant;
+import org.apache.helix.mock.storage.MockParticipant.ErrTransition;
+import org.apache.helix.model.LiveInstance;
+import org.apache.helix.model.Message;
+import org.apache.helix.tools.ClusterSetup;
+import org.apache.helix.tools.ClusterStateVerifier;
+import org.apache.helix.webapp.resources.JsonParameters;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+
+public class TestResetPartitionState extends AdminTestBase
+{
+  String getClusterUrl(String cluster)
+  {
+    return "http://localhost:" + ADMIN_PORT + "/clusters" + "/" + cluster;
+  }
+  
+  String getInstanceUrl(String cluster, String instance)
+  {
+    return "http://localhost:" + ADMIN_PORT + "/clusters/" + cluster + "/instances/" + instance;
+  }
+  
+  String getResourceUrl(String cluster, String resourceGroup)
+  {
+    return "http://localhost:" + ADMIN_PORT + "/clusters/" + cluster + "/resourceGroups/" + resourceGroup;
+  }
+  
+  int _errToOfflineInvoked = 0;
+  class ErrTransitionWithResetCnt extends ErrTransition
+  {
+    public ErrTransitionWithResetCnt(Map<String, Set<String>> errPartitions)
+    {
+      super(errPartitions);
+    }
+
+    @Override
+    public void doTransition(Message message, NotificationContext context)
+    {
+      super.doTransition(message, context);
+      String fromState = message.getFromState();
+      String toState = message.getToState();
+      if (fromState.equals("ERROR") && toState.equals("OFFLINE"))
+      {
+        // System.err.println("doReset() invoked");
+        _errToOfflineInvoked++;
+      }
+    }
+  }
+  
+  @Test()
+  public void testResetPartitionState() throws Exception
+  {
+    String className = TestHelper.getTestClassName();
+    String methodName = TestHelper.getTestMethodName();
+    String clusterName = className + "_" + methodName;
+    final int n = 5;
+
+    System.out.println("START " + clusterName + " at "
+        + new Date(System.currentTimeMillis()));
+
+    TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
+                            "localhost", // participant name prefix
+                            "TestDB", // resource name prefix
+                            1, // resources
+                            10, // partitions per resource
+                            n, // number of nodes
+                            3, // replicas
+                            "MasterSlave",
+                            true); // do rebalance
+
+    // start admin thread
+//    AdminThread adminThread = new AdminThread(ZK_ADDR, _port);
+//    adminThread.start();
+    
+    
+    // start controller
+    ClusterController controller =
+        new ClusterController(clusterName, "controller_0", ZK_ADDR);
+    controller.syncStart();
+
+    Map<String, Set<String>> errPartitions = new HashMap<String, Set<String>>()
+    {
+      {
+        put("SLAVE-MASTER", TestHelper.setOf("TestDB0_4"));
+        put("OFFLINE-SLAVE", TestHelper.setOf("TestDB0_8"));
+      }
+    };
+
+    // start mock participants
+    MockParticipant[] participants = new MockParticipant[n];
+    for (int i = 0; i < n; i++)
+    {
+      String instanceName = "localhost_" + (12918 + i);
+
+      if (i == 0)
+      {
+        participants[i] =
+            new MockParticipant(clusterName,
+                                instanceName,
+                                ZK_ADDR,
+                                new ErrTransition(errPartitions));
+      }
+      else
+      {
+        participants[i] = new MockParticipant(clusterName, instanceName, ZK_ADDR);
+      }
+      participants[i].syncStart();
+    }
+
+    // verify cluster
+    Map<String, Map<String, String>> errStateMap =
+        new HashMap<String, Map<String, String>>();
+    errStateMap.put("TestDB0", new HashMap<String, String>());
+    errStateMap.get("TestDB0").put("TestDB0_4", "localhost_12918");
+    errStateMap.get("TestDB0").put("TestDB0_8", "localhost_12918");
+    boolean result =
+        ClusterStateVerifier.verifyByZkCallback((new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR,
+                                                                                                       clusterName,
+                                                                                                       errStateMap)));
+    Assert.assertTrue(result, "Cluster verification fails");
+    
+    
+    // reset a non-exist partition, should throw exception
+    String hostName = "localhost_12918";
+    String instanceUrl = getInstanceUrl(clusterName, hostName);
+
+    Map<String, String> paramMap = new HashMap<String, String>();
+    paramMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.resetPartition);
+    paramMap.put(JsonParameters.PARTITION, "TestDB0_nonExist");
+    paramMap.put(JsonParameters.RESOURCE, "TestDB0");
+    System.out.println("IGNORABLE exception: test reset non-exist partition");
+    TestHelixAdminScenariosRest.assertSuccessPostOperation(instanceUrl, paramMap, true);
+    
+    
+    // reset one error partition
+    errPartitions.clear();  // remove("SLAVE-MASTER");
+    participants[0].setTransition(new ErrTransitionWithResetCnt(errPartitions));
+    clearStatusUpdate(clusterName, "localhost_12918", "TestDB0", "TestDB0_4");
+    _errToOfflineInvoked = 0;
+
+    paramMap.put(JsonParameters.PARTITION, "TestDB0_4 TestDB0_8");
+    TestHelixAdminScenariosRest.assertSuccessPostOperation(instanceUrl, paramMap, false);
+
+    Thread.sleep(400); // wait reset to be done
+    System.out.println("IGNORABLE exception: test reset non-error partition");
+    TestHelixAdminScenariosRest.assertSuccessPostOperation(instanceUrl, paramMap, true);
+
+    result =
+        ClusterStateVerifier.verifyByZkCallback(new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR,
+                                                                                                   clusterName));
+    Assert.assertTrue(result);
+    Assert.assertEquals(_errToOfflineInvoked, 2, "reset() should be invoked 2 times");
+    
+
+    // clean up
+    // wait for all zk callbacks done
+    Thread.sleep(1000);
+//    adminThread.stop();
+    controller.syncStop();
+    for (int i = 0; i < 5; i++)
+    {
+      participants[i].syncStop();
+    }
+
+    System.out.println("END " + clusterName + " at "
+        + new Date(System.currentTimeMillis()));
+  }
+
+  private void clearStatusUpdate(String clusterName, String instance, String resource,
+      String partition)
+  {
+    // clear status update for error partition so verify() will not fail on old
+    // errors
+    ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient));
+    Builder keyBuilder = accessor.keyBuilder();
+
+    LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instance));
+    accessor.removeProperty(keyBuilder.stateTransitionStatus(instance, liveInstance.getSessionId(), resource, partition));
+
+   }
+  
+  // TODO: throw exception in reset()
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetResource.java
----------------------------------------------------------------------
diff --git a/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetResource.java b/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetResource.java
new file mode 100644
index 0000000..e434279
--- /dev/null
+++ b/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetResource.java
@@ -0,0 +1,120 @@
+package org.apache.helix.tools;
+
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.helix.TestHelper;
+import org.apache.helix.mock.controller.ClusterController;
+import org.apache.helix.mock.storage.MockParticipant;
+import org.apache.helix.mock.storage.MockParticipant.ErrTransition;
+import org.apache.helix.tools.ClusterSetup;
+import org.apache.helix.tools.ClusterStateVerifier;
+import org.apache.helix.webapp.resources.JsonParameters;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+
+public class TestResetResource extends AdminTestBase
+{
+  @Test
+  public void testResetNode() throws Exception
+  {
+    String className = TestHelper.getTestClassName();
+    String methodName = TestHelper.getTestMethodName();
+    String clusterName = className + "_" + methodName;
+    final int n = 5;
+
+    System.out.println("START " + clusterName + " at "
+        + new Date(System.currentTimeMillis()));
+
+    TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
+                            "localhost", // participant name prefix
+                            "TestDB", // resource name prefix
+                            1, // resources
+                            10, // partitions per resource
+                            n, // number of nodes
+                            3, // replicas
+                            "MasterSlave",
+                            true); // do rebalance
+    
+    // start admin thread
+//    AdminThread adminThread = new AdminThread(ZK_ADDR, _port);
+//    adminThread.start();
+
+    // start controller
+    ClusterController controller =
+        new ClusterController(clusterName, "controller_0", ZK_ADDR);
+    controller.syncStart();
+
+    Map<String, Set<String>> errPartitions = new HashMap<String, Set<String>>()
+    {
+      {
+        put("SLAVE-MASTER", TestHelper.setOf("TestDB0_4"));
+        put("OFFLINE-SLAVE", TestHelper.setOf("TestDB0_8"));
+      }
+    };
+
+    // start mock participants
+    MockParticipant[] participants = new MockParticipant[n];
+    for (int i = 0; i < n; i++)
+    {
+      String instanceName = "localhost_" + (12918 + i);
+
+      if (i == 0)
+      {
+        participants[i] =
+            new MockParticipant(clusterName,
+                                instanceName,
+                                ZK_ADDR,
+                                new ErrTransition(errPartitions));
+      }
+      else
+      {
+        participants[i] = new MockParticipant(clusterName, instanceName, ZK_ADDR);
+      }
+      participants[i].syncStart();
+    }
+
+    // verify cluster
+    Map<String, Map<String, String>> errStateMap =
+        new HashMap<String, Map<String, String>>();
+    errStateMap.put("TestDB0", new HashMap<String, String>());
+    errStateMap.get("TestDB0").put("TestDB0_4", "localhost_12918");
+    errStateMap.get("TestDB0").put("TestDB0_8", "localhost_12918");
+    boolean result =
+        ClusterStateVerifier.verifyByZkCallback((new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR,
+                                                                                                       clusterName,
+                                                                                                       errStateMap)));
+    Assert.assertTrue(result, "Cluster verification fails");
+    
+    // reset resource "TestDB0"
+    participants[0].setTransition(null);
+    String resourceName = "TestDB0";
+    String resourceUrl = "http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/resourceGroups/" + resourceName;
+
+    Map<String, String> paramMap = new HashMap<String, String>();
+    paramMap.put(JsonParameters.MANAGEMENT_COMMAND, ClusterSetup.resetResource);
+    TestHelixAdminScenariosRest.assertSuccessPostOperation(resourceUrl, paramMap, false);
+
+    result =
+        ClusterStateVerifier.verifyByZkCallback((new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR,
+                                                                                                       clusterName)));
+    Assert.assertTrue(result, "Cluster verification fails");
+    
+    // clean up
+    // wait for all zk callbacks done
+    Thread.sleep(1000);
+//    adminThread.stop();
+    controller.syncStop();
+    for (int i = 0; i < 5; i++)
+    {
+      participants[i].syncStop();
+    }
+
+    System.out.println("END " + clusterName + " at "
+        + new Date(System.currentTimeMillis()));
+
+  }
+}