You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@helix.apache.org by ki...@apache.org on 2012/10/25 01:14:58 UTC

[26/42] Refactoring the package names and removing jsql parser

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-core/src/test/java/org/apache/helix/Mocks.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/Mocks.java b/helix-core/src/test/java/org/apache/helix/Mocks.java
new file mode 100644
index 0000000..c5c07a5
--- /dev/null
+++ b/helix-core/src/test/java/org/apache/helix/Mocks.java
@@ -0,0 +1,788 @@
+/**
+ * Copyright (C) 2012 LinkedIn Inc <op...@linkedin.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.helix;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.Future;
+
+import org.I0Itec.zkclient.DataUpdater;
+import org.I0Itec.zkclient.IZkChildListener;
+import org.I0Itec.zkclient.IZkDataListener;
+import org.apache.helix.BaseDataAccessor;
+import org.apache.helix.ClusterMessagingService;
+import org.apache.helix.ConfigAccessor;
+import org.apache.helix.ConfigChangeListener;
+import org.apache.helix.ControllerChangeListener;
+import org.apache.helix.Criteria;
+import org.apache.helix.CurrentStateChangeListener;
+import org.apache.helix.DataAccessor;
+import org.apache.helix.ExternalViewChangeListener;
+import org.apache.helix.HealthStateChangeListener;
+import org.apache.helix.HelixAdmin;
+import org.apache.helix.HelixDataAccessor;
+import org.apache.helix.HelixManager;
+import org.apache.helix.HelixProperty;
+import org.apache.helix.IdealStateChangeListener;
+import org.apache.helix.InstanceType;
+import org.apache.helix.LiveInstanceChangeListener;
+import org.apache.helix.MessageListener;
+import org.apache.helix.NotificationContext;
+import org.apache.helix.PreConnectCallback;
+import org.apache.helix.PropertyKey;
+import org.apache.helix.PropertyType;
+import org.apache.helix.ZNRecord;
+import org.apache.helix.PropertyKey.Builder;
+import org.apache.helix.healthcheck.HealthReportProvider;
+import org.apache.helix.healthcheck.ParticipantHealthReportCollector;
+import org.apache.helix.messaging.AsyncCallback;
+import org.apache.helix.messaging.handling.HelixTaskExecutor;
+import org.apache.helix.messaging.handling.HelixTaskResult;
+import org.apache.helix.messaging.handling.MessageHandlerFactory;
+import org.apache.helix.model.Message;
+import org.apache.helix.participant.StateMachineEngine;
+import org.apache.helix.participant.statemachine.StateModel;
+import org.apache.helix.participant.statemachine.StateModelInfo;
+import org.apache.helix.participant.statemachine.Transition;
+import org.apache.helix.store.PropertyStore;
+import org.apache.helix.store.zk.ZkHelixPropertyStore;
+import org.apache.zookeeper.data.Stat;
+
+
+public class Mocks {
+	public static class MockBaseDataAccessor implements BaseDataAccessor<ZNRecord> {
+		Map<String, ZNRecord> map = new HashMap<String, ZNRecord>();
+
+		@Override
+		public boolean create(String path, ZNRecord record, int options) {
+			// TODO Auto-generated method stub
+			return false;
+		}
+
+		@Override
+		public boolean set(String path, ZNRecord record, int options) {
+			System.err.println("Store.write()" + System.currentTimeMillis());
+			map.put(path, record);
+			try {
+				Thread.sleep(50);
+			} catch (InterruptedException e) {
+				e.printStackTrace();
+			}
+			return true;
+		}
+
+		@Override
+		public boolean update(String path, DataUpdater<ZNRecord> updater,
+				int options) {
+			// TODO Auto-generated method stub
+			return false;
+		}
+
+		@Override
+		public boolean remove(String path, int options) {
+			// TODO Auto-generated method stub
+			return false;
+		}
+
+		@Override
+		public boolean[] createChildren(List<String> paths,
+				List<ZNRecord> records, int options) {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public boolean[] setChildren(List<String> paths,
+				List<ZNRecord> records, int options) {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public boolean[] updateChildren(List<String> paths,
+				List<DataUpdater<ZNRecord>> updaters, int options) {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public boolean[] remove(List<String> paths, int options) {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public ZNRecord get(String path, Stat stat, int options) {
+		    return map.get(path);
+		}
+
+		@Override
+		public List<ZNRecord> get(List<String> paths, List<Stat> stats,
+				int options) {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public List<ZNRecord> getChildren(String parentPath, List<Stat> stats,
+				int options) {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public List<String> getChildNames(String parentPath, int options) {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public boolean exists(String path, int options) {
+			// TODO Auto-generated method stub
+			return false;
+		}
+
+		@Override
+		public boolean[] exists(List<String> paths, int options) {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public Stat[] getStats(List<String> paths, int options) {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public Stat getStat(String path, int options) {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+    @Override
+    public void subscribeDataChanges(String path, IZkDataListener listener)
+    {
+      // TODO Auto-generated method stub
+      
+    }
+
+    @Override
+    public void unsubscribeDataChanges(String path, IZkDataListener listener)
+    {
+      // TODO Auto-generated method stub
+      
+    }
+
+    @Override
+    public List<String> subscribeChildChanges(String path, IZkChildListener listener)
+    {
+      // TODO Auto-generated method stub
+      return null;
+    }
+
+    @Override
+    public void unsubscribeChildChanges(String path, IZkChildListener listener)
+    {
+      // TODO Auto-generated method stub
+      
+    }
+
+    @Override
+    public void reset()
+    {
+      // TODO Auto-generated method stub
+      
+    }
+
+//		@Override
+//		public boolean subscribe(String path, IZkListener listener) {
+//			// TODO Auto-generated method stub
+//			return false;
+//		}
+//
+//		@Override
+//		public boolean unsubscribe(String path, IZkListener listener) {
+//			// TODO Auto-generated method stub
+//			return false;
+//		}
+
+	}
+
+	public static class MockStateModel extends StateModel {
+		boolean stateModelInvoked = false;
+
+		public void onBecomeMasterFromSlave(Message msg,
+				NotificationContext context) {
+			stateModelInvoked = true;
+		}
+
+		public void onBecomeSlaveFromOffline(Message msg,
+				NotificationContext context) {
+			stateModelInvoked = true;
+		}
+	}
+
+	@StateModelInfo(states = "{'OFFLINE','SLAVE','MASTER'}", initialState = "OFFINE")
+	public static class MockStateModelAnnotated extends StateModel {
+		boolean stateModelInvoked = false;
+
+		@Transition(from = "SLAVE", to = "MASTER")
+		public void slaveToMaster(Message msg, NotificationContext context) {
+			stateModelInvoked = true;
+		}
+
+		@Transition(from = "OFFLINE", to = "SLAVE")
+		public void offlineToSlave(Message msg, NotificationContext context) {
+			stateModelInvoked = true;
+		}
+	}
+
+	public static class MockHelixTaskExecutor extends HelixTaskExecutor {
+		boolean completionInvoked = false;
+
+		@Override
+		protected void reportCompletion(Message message) {
+			System.out.println("Mocks.MockCMTaskExecutor.reportCompletion()");
+			completionInvoked = true;
+		}
+
+		public boolean isDone(String taskId) {
+			Future<HelixTaskResult> future = _taskMap.get(taskId);
+			if (future != null) {
+				return future.isDone();
+			}
+			return false;
+		}
+	}
+
+	public static class MockManager implements HelixManager {
+		MockAccessor accessor;
+
+		private final String _clusterName;
+		private final String _sessionId;
+		String _instanceName;
+		ClusterMessagingService _msgSvc;
+		private String _version;
+
+		public MockManager() {
+			this("testCluster-" + Math.random() * 10000 % 999);
+		}
+
+		public MockManager(String clusterName) {
+			_clusterName = clusterName;
+			accessor = new MockAccessor(clusterName);
+			_sessionId = UUID.randomUUID().toString();
+			_instanceName = "testInstanceName";
+			_msgSvc = new MockClusterMessagingService();
+		}
+
+		@Override
+		public void disconnect() {
+
+		}
+
+		@Override
+		public void addIdealStateChangeListener(
+				IdealStateChangeListener listener) throws Exception {
+			// TODO Auto-generated method stub
+
+		}
+
+		@Override
+		public void addLiveInstanceChangeListener(
+				LiveInstanceChangeListener listener) {
+			// TODO Auto-generated method stub
+
+		}
+
+		@Override
+		public void addConfigChangeListener(ConfigChangeListener listener) {
+			// TODO Auto-generated method stub
+
+		}
+
+		@Override
+		public void addMessageListener(MessageListener listener,
+				String instanceName) {
+			// TODO Auto-generated method stub
+
+		}
+
+		@Override
+		public void addCurrentStateChangeListener(
+				CurrentStateChangeListener listener, String instanceName,
+				String sessionId) {
+			// TODO Auto-generated method stub
+
+		}
+
+		@Override
+		public void addExternalViewChangeListener(
+				ExternalViewChangeListener listener) {
+			// TODO Auto-generated method stub
+
+		}
+
+		@Override
+		public DataAccessor getDataAccessor() {
+			return null;
+		}
+
+		@Override
+		public String getClusterName() {
+			return _clusterName;
+		}
+
+		@Override
+		public String getInstanceName() {
+			return _instanceName;
+		}
+
+		@Override
+		public void connect() {
+			// TODO Auto-generated method stub
+
+		}
+
+		@Override
+		public String getSessionId() {
+			return _sessionId;
+		}
+
+		@Override
+		public boolean isConnected() {
+			// TODO Auto-generated method stub
+			return false;
+		}
+
+		@Override
+		public long getLastNotificationTime() {
+			// TODO Auto-generated method stub
+			return 0;
+		}
+
+		@Override
+		public void addControllerListener(ControllerChangeListener listener) {
+			// TODO Auto-generated method stub
+
+		}
+
+		@Override
+		public boolean removeListener(Object listener) {
+			// TODO Auto-generated method stub
+			return false;
+		}
+
+		@Override
+		public HelixAdmin getClusterManagmentTool() {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public ClusterMessagingService getMessagingService() {
+			// TODO Auto-generated method stub
+			return _msgSvc;
+		}
+
+		@Override
+		public ParticipantHealthReportCollector getHealthReportCollector() {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public InstanceType getInstanceType() {
+			return InstanceType.PARTICIPANT;
+		}
+
+		@Override
+		public PropertyStore<ZNRecord> getPropertyStore() {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public String getVersion() {
+			return _version;
+		}
+
+		public void setVersion(String version) {
+			_version = version;
+		}
+
+		@Override
+		public void addHealthStateChangeListener(
+				HealthStateChangeListener listener, String instanceName)
+				throws Exception {
+			// TODO Auto-generated method stub
+
+		}
+
+		@Override
+		public StateMachineEngine getStateMachineEngine() {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public boolean isLeader() {
+			// TODO Auto-generated method stub
+			return false;
+		}
+
+		@Override
+		public ConfigAccessor getConfigAccessor() {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public void startTimerTasks() {
+			// TODO Auto-generated method stub
+
+		}
+
+		@Override
+		public void stopTimerTasks() {
+			// TODO Auto-generated method stub
+
+		}
+
+		@Override
+		public HelixDataAccessor getHelixDataAccessor() {
+			return accessor;
+		}
+
+		@Override
+		public void addPreConnectCallback(PreConnectCallback callback) {
+			// TODO Auto-generated method stub
+
+		}
+
+    @Override
+    public ZkHelixPropertyStore<ZNRecord> getHelixPropertyStore()
+    {
+      // TODO Auto-generated method stub
+      return null;
+    }
+
+	}
+
+	public static class MockAccessor implements HelixDataAccessor // DataAccessor
+	{
+		private final String _clusterName;
+		Map<String, ZNRecord> data = new HashMap<String, ZNRecord>();
+		private final Builder _propertyKeyBuilder;
+
+		public MockAccessor() {
+			this("testCluster-" + Math.random() * 10000 % 999);
+		}
+
+		public MockAccessor(String clusterName) {
+			_clusterName = clusterName;
+			_propertyKeyBuilder = new PropertyKey.Builder(_clusterName);
+		}
+
+		Map<String, ZNRecord> map = new HashMap<String, ZNRecord>();
+
+		@Override
+		// public boolean setProperty(PropertyType type, HelixProperty value,
+		// String... keys)
+		public boolean setProperty(PropertyKey key, HelixProperty value) {
+			// return setProperty(type, value.getRecord(), keys);
+			String path = key.getPath();
+			data.put(path, value.getRecord());
+			return true;
+		}
+
+		// @Override
+		// public boolean setProperty(PropertyType type, ZNRecord value,
+		// String... keys)
+		// {
+		// String path = PropertyPathConfig.getPath(type, _clusterName, keys);
+		// data.put(path, value);
+		// return true;
+		// }
+
+		// @Override
+		// public boolean updateProperty(PropertyType type, HelixProperty value,
+		// String... keys)
+		// {
+		// return updateProperty(type, value.getRecord(), keys);
+		// }
+
+		@Override
+		public <T extends HelixProperty> boolean updateProperty(
+				PropertyKey key, T value) {
+			// String path = PropertyPathConfig.getPath(type, _clusterName,
+			// keys);
+			String path = key.getPath();
+			PropertyType type = key.getType();
+			if (type.updateOnlyOnExists) {
+				if (data.containsKey(path)) {
+					if (type.mergeOnUpdate) {
+						ZNRecord znRecord = new ZNRecord(data.get(path));
+						znRecord.merge(value.getRecord());
+						data.put(path, znRecord);
+					} else {
+						data.put(path, value.getRecord());
+					}
+				}
+			} else {
+				if (type.mergeOnUpdate) {
+					if (data.containsKey(path)) {
+						ZNRecord znRecord = new ZNRecord(data.get(path));
+						znRecord.merge(value.getRecord());
+						data.put(path, znRecord);
+					} else {
+						data.put(path, value.getRecord());
+					}
+				} else {
+					data.put(path, value.getRecord());
+				}
+			}
+
+			return true;
+		}
+
+		// @Override
+		// public <T extends HelixProperty> T getProperty(Class<T> clazz,
+		// PropertyType type,
+		// String... keys)
+		// {
+		// ZNRecord record = getProperty(type, keys);
+		// if (record == null)
+		// {
+		// return null;
+		// }
+		// return HelixProperty.convertToTypedInstance(clazz, record);
+		// }
+
+		@SuppressWarnings("unchecked")
+		@Override
+		public <T extends HelixProperty> T getProperty(PropertyKey key)
+		// public ZNRecord getProperty(PropertyType type, String... keys)
+		{
+			// String path = PropertyPathConfig.getPath(type, _clusterName,
+			// keys);
+			String path = key.getPath();
+			return (T) HelixProperty.convertToTypedInstance(key.getTypeClass(),
+					data.get(path));
+		}
+
+		@Override
+		public boolean removeProperty(PropertyKey key)
+		// public boolean removeProperty(PropertyType type, String... keys)
+		{
+			String path = key.getPath(); // PropertyPathConfig.getPath(type,
+											// _clusterName, keys);
+			data.remove(path);
+			return true;
+		}
+
+		@Override
+		public List<String> getChildNames(PropertyKey propertyKey)
+		// public List<String> getChildNames(PropertyType type, String... keys)
+		{
+			List<String> child = new ArrayList<String>();
+			String path = propertyKey.getPath(); // PropertyPathConfig.getPath(type,
+													// _clusterName, keys);
+			for (String key : data.keySet()) {
+				if (key.startsWith(path)) {
+					String[] keySplit = key.split("\\/");
+					String[] pathSplit = path.split("\\/");
+					if (keySplit.length > pathSplit.length) {
+						child.add(keySplit[pathSplit.length + 1]);
+					}
+				}
+			}
+			return child;
+		}
+
+		// @Override
+		// public <T extends HelixProperty> List<T> getChildValues(Class<T>
+		// clazz, PropertyType type,
+		// String... keys)
+		// {
+		// List<ZNRecord> list = getChildValues(type, keys);
+		// return HelixProperty.convertToTypedList(clazz, list);
+		// }
+
+		@SuppressWarnings("unchecked")
+		@Override
+		public <T extends HelixProperty> List<T> getChildValues(
+				PropertyKey propertyKey)
+		// public List<ZNRecord> getChildValues(PropertyType type, String...
+		// keys)
+		{
+			List<ZNRecord> childs = new ArrayList<ZNRecord>();
+			String path = propertyKey.getPath(); // PropertyPathConfig.getPath(type,
+													// _clusterName, keys);
+			for (String key : data.keySet()) {
+				if (key.startsWith(path)) {
+					String[] keySplit = key.split("\\/");
+					String[] pathSplit = path.split("\\/");
+					if (keySplit.length - pathSplit.length == 1) {
+						ZNRecord record = data.get(key);
+						if (record != null) {
+							childs.add(record);
+						}
+					} else {
+						System.out.println("keySplit:"
+								+ Arrays.toString(keySplit));
+						System.out.println("pathSplit:"
+								+ Arrays.toString(pathSplit));
+					}
+				}
+			}
+			return (List<T>) HelixProperty.convertToTypedList(
+					propertyKey.getTypeClass(), childs);
+		}
+
+		@Override
+		public <T extends HelixProperty> Map<String, T> getChildValuesMap(
+				PropertyKey key)
+		// public <T extends HelixProperty> Map<String, T>
+		// getChildValuesMap(Class<T> clazz,
+		// PropertyType type, String... keys)
+		{
+			List<T> list = getChildValues(key);
+			return HelixProperty.convertListToMap(list);
+		}
+
+		@Override
+		public <T extends HelixProperty> boolean createProperty(
+				PropertyKey key, T value) {
+			// TODO Auto-generated method stub
+			return false;
+		}
+
+		@Override
+		public <T extends HelixProperty> boolean[] createChildren(
+				List<PropertyKey> keys, List<T> children) {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public <T extends HelixProperty> boolean[] setChildren(
+				List<PropertyKey> keys, List<T> children) {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public Builder keyBuilder() {
+			return _propertyKeyBuilder;
+		}
+
+		@Override
+		public BaseDataAccessor getBaseDataAccessor() {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public <T extends HelixProperty> boolean[] updateChildren(
+				List<String> paths, List<DataUpdater<ZNRecord>> updaters,
+				int options) {
+			// TODO Auto-generated method stub
+			return null;
+		}
+		
+	    @Override
+	    public <T extends HelixProperty> List<T> getProperty(List<PropertyKey> keys)
+	    {
+	      List<T> list = new ArrayList<T>();
+	      for (PropertyKey key : keys)
+	      {
+	        @SuppressWarnings("unchecked")
+	        T t = (T)getProperty(key);
+	        list.add(t);
+	      }
+	      return list;
+	    }
+	}
+
+	public static class MockHealthReportProvider extends HealthReportProvider {
+
+		@Override
+		public Map<String, String> getRecentHealthReport() {
+			// TODO Auto-generated method stub
+			return null;
+		}
+
+		@Override
+		public void resetStats() {
+			// TODO Auto-generated method stub
+
+		}
+
+	}
+
+	public static class MockClusterMessagingService implements
+			ClusterMessagingService {
+
+		@Override
+		public int send(Criteria recipientCriteria, Message message) {
+			// TODO Auto-generated method stub
+			return 0;
+		}
+
+		@Override
+		public int send(Criteria receipientCriteria, Message message,
+				AsyncCallback callbackOnReply, int timeOut) {
+			// TODO Auto-generated method stub
+			return 0;
+		}
+
+		@Override
+		public int sendAndWait(Criteria receipientCriteria, Message message,
+				AsyncCallback callbackOnReply, int timeOut) {
+			// TODO Auto-generated method stub
+			return 0;
+		}
+
+		@Override
+		public void registerMessageHandlerFactory(String type,
+				MessageHandlerFactory factory) {
+			// TODO Auto-generated method stub
+
+		}
+
+		@Override
+		public int send(Criteria receipientCriteria, Message message,
+				AsyncCallback callbackOnReply, int timeOut, int retryCount) {
+			// TODO Auto-generated method stub
+			return 0;
+		}
+
+		@Override
+		public int sendAndWait(Criteria receipientCriteria, Message message,
+				AsyncCallback callbackOnReply, int timeOut, int retryCount) {
+			// TODO Auto-generated method stub
+			return 0;
+		}
+
+	}
+// >>>>>>> 5ef256eeced461eae733d568ad730aabeda3c0f2
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-core/src/test/java/org/apache/helix/ScriptTestHelper.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/ScriptTestHelper.java b/helix-core/src/test/java/org/apache/helix/ScriptTestHelper.java
new file mode 100644
index 0000000..f7bd0fe
--- /dev/null
+++ b/helix-core/src/test/java/org/apache/helix/ScriptTestHelper.java
@@ -0,0 +1,62 @@
+package org.apache.helix;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.log4j.Logger;
+import org.testng.Assert;
+
+public class ScriptTestHelper
+{
+  private static final Logger LOG = Logger.getLogger(ScriptTestHelper.class);
+
+  public static final String INTEGRATION_SCRIPT_DIR = "src/main/scripts/integration-test/script";
+  public static final String INTEGRATION_TEST_DIR = "src/main/scripts/integration-test/testcases";
+  public static final long EXEC_TIMEOUT = 1200;
+  
+  public static String getPrefix()
+  {
+    StringBuilder prefixBuilder = new StringBuilder("");
+    String prefix = "";
+    String filepath = INTEGRATION_SCRIPT_DIR;
+    File integrationScriptDir = new File(filepath);
+
+    while (!integrationScriptDir.exists())
+    {
+      prefixBuilder.append("../");
+      prefix = prefixBuilder.toString();
+
+      integrationScriptDir = new File(prefix + filepath);
+
+      // Give up
+      if (prefix.length() > 30)
+      {
+        return "";
+      }
+    }
+    return new File(prefix).getAbsolutePath() + "/";
+  }
+  
+  public static ExternalCommand runCommandLineTest(String testName, String... arguments) throws IOException, InterruptedException,
+  TimeoutException
+  {
+    ExternalCommand cmd = ExternalCommand.executeWithTimeout(new File(getPrefix() + INTEGRATION_TEST_DIR),
+                                             testName, EXEC_TIMEOUT, arguments);
+    int exitValue = cmd.exitValue();
+    String output = cmd.getStringOutput("UTF8");
+
+    if (0 == exitValue)
+    {
+      LOG.info("Test " + testName + " has run. ExitCode=" + exitValue + ". Command output: " + output);
+    }
+    else
+    {
+      LOG.warn("Test " + testName + " is FAILING. ExitCode=" + exitValue + ". Command output: " + output);
+      Assert.fail(output);
+//      return cmd;
+    }
+    return cmd;
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-core/src/test/java/org/apache/helix/TestClusterviewSerializer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/TestClusterviewSerializer.java b/helix-core/src/test/java/org/apache/helix/TestClusterviewSerializer.java
new file mode 100644
index 0000000..830dafb
--- /dev/null
+++ b/helix-core/src/test/java/org/apache/helix/TestClusterviewSerializer.java
@@ -0,0 +1,73 @@
+/**
+ * Copyright (C) 2012 LinkedIn Inc <op...@linkedin.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.helix;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.helix.ClusterView;
+import org.apache.helix.ZNRecord;
+import org.apache.helix.manager.file.StaticFileHelixManager;
+import org.apache.helix.tools.ClusterViewSerializer;
+import org.testng.AssertJUnit;
+import org.testng.annotations.Test;
+
+
+public class TestClusterviewSerializer
+{
+  @Test ()
+  public void testClusterviewSerializer() throws Exception
+  {
+    List<StaticFileHelixManager.DBParam> dbParams = new ArrayList<StaticFileHelixManager.DBParam>();
+    // dbParams.add(new FileBasedClusterManager.DBParam("BizFollow", 1));
+    dbParams.add(new StaticFileHelixManager.DBParam("BizProfile", 1));
+    // dbParams.add(new FileBasedClusterManager.DBParam("EspressoDB", 10));
+    // dbParams.add(new FileBasedClusterManager.DBParam("MailboxDB", 128));
+    // dbParams.add(new FileBasedClusterManager.DBParam("MyDB", 8));
+    // dbParams.add(new FileBasedClusterManager.DBParam("schemata", 1));
+    // String[] nodesInfo = { "localhost:8900", "localhost:8901",
+    // "localhost:8902", "localhost:8903",
+    // "localhost:8904" };
+    String[] nodesInfo = { "localhost:12918" };
+    int replication = 0;
+
+    ClusterView view = StaticFileHelixManager.generateStaticConfigClusterView(nodesInfo, dbParams, replication);
+    view.setExternalView(new LinkedList<ZNRecord>());
+    String file = "/tmp/clusterView.json";
+    // ClusterViewSerializer serializer = new ClusterViewSerializer(file);
+
+    // byte[] bytes;
+    ClusterViewSerializer.serialize(view, new File(file));
+    // String str1 = new String(bytes);
+    ClusterView restoredView = ClusterViewSerializer.deserialize(new File(file));
+    // logger.info(restoredView);
+
+    // byte[] bytes2 = serializer.serialize(restoredView);
+
+    VerifyClusterViews(view, restoredView);
+  }
+
+  public void VerifyClusterViews(ClusterView view1, ClusterView view2)
+  {
+    AssertJUnit.assertEquals(view1.getPropertyLists().size(), view2.getPropertyLists().size());
+    AssertJUnit.assertEquals(view1.getExternalView().size(), view2.getExternalView().size());
+    AssertJUnit.assertEquals(view1.getMemberInstanceMap().size(), view2.getMemberInstanceMap().size());
+    AssertJUnit.assertEquals(view1.getInstances().size(), view2.getInstances().size());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-core/src/test/java/org/apache/helix/TestConfigAccessor.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/TestConfigAccessor.java b/helix-core/src/test/java/org/apache/helix/TestConfigAccessor.java
new file mode 100644
index 0000000..c0b2f92
--- /dev/null
+++ b/helix-core/src/test/java/org/apache/helix/TestConfigAccessor.java
@@ -0,0 +1,150 @@
+/**
+ * Copyright (C) 2012 LinkedIn Inc <op...@linkedin.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.helix;
+
+import java.util.List;
+
+import org.apache.helix.ConfigAccessor;
+import org.apache.helix.ConfigScope;
+import org.apache.helix.ConfigScopeBuilder;
+import org.apache.helix.ConfigScope.ConfigScopeProperty;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+
+public class TestConfigAccessor extends ZkUnitTestBase
+{
+  final String _className = getShortClassName();
+  final String _clusterName = "CLUSTER_" + _className;
+
+  @Test
+  public void testZkConfigAccessor() throws Exception
+  {
+    TestHelper.setupCluster(_clusterName, ZK_ADDR, 12918, "localhost", "TestDB", 1, 10, 5, 3,
+        "MasterSlave", true);
+
+    ConfigAccessor appConfig = new ConfigAccessor(_gZkClient);
+    ConfigScope clusterScope = new ConfigScopeBuilder().forCluster(_clusterName).build();
+
+    // cluster scope config
+    String clusterConfigValue = appConfig.get(clusterScope, "clusterConfigKey");
+    Assert.assertNull(clusterConfigValue);
+
+    appConfig.set(clusterScope, "clusterConfigKey", "clusterConfigValue");
+    clusterConfigValue = appConfig.get(clusterScope, "clusterConfigKey");
+    Assert.assertEquals(clusterConfigValue, "clusterConfigValue");
+
+    // resource scope config
+    ConfigScope resourceScope = new ConfigScopeBuilder().forCluster(_clusterName)
+        .forResource("testResource").build();
+    appConfig.set(resourceScope, "resourceConfigKey", "resourceConfigValue");
+    String resourceConfigValue = appConfig.get(resourceScope, "resourceConfigKey");
+    Assert.assertEquals(resourceConfigValue, "resourceConfigValue");
+
+    // partition scope config
+    ConfigScope partitionScope = new ConfigScopeBuilder().forCluster(_clusterName)
+        .forResource("testResource").forPartition("testPartition").build();
+    appConfig.set(partitionScope, "partitionConfigKey", "partitionConfigValue");
+    String partitionConfigValue = appConfig.get(partitionScope, "partitionConfigKey");
+    Assert.assertEquals(partitionConfigValue, "partitionConfigValue");
+
+    // participant scope config
+    ConfigScope participantScope = new ConfigScopeBuilder().forCluster(_clusterName)
+        .forParticipant("localhost_12918").build();
+    appConfig.set(participantScope, "participantConfigKey", "participantConfigValue");
+    String participantConfigValue = appConfig.get(participantScope, "participantConfigKey");
+    Assert.assertEquals(participantConfigValue, "participantConfigValue");
+
+    List<String> keys = appConfig.getKeys(ConfigScopeProperty.RESOURCE, _clusterName);
+    Assert.assertEquals(keys.size(), 1, "should be [testResource]");
+    Assert.assertEquals(keys.get(0), "testResource");
+
+    keys = appConfig.getKeys(ConfigScopeProperty.CLUSTER, _clusterName);
+    Assert.assertEquals(keys.size(), 1, "should be [" + _clusterName + "]");
+    Assert.assertEquals(keys.get(0), _clusterName);
+
+    keys = appConfig.getKeys(ConfigScopeProperty.PARTICIPANT, _clusterName);
+    Assert.assertEquals(keys.size(), 5, "should be [localhost_12918~22] sorted");
+    Assert.assertEquals(keys.get(0), "localhost_12918");
+    Assert.assertEquals(keys.get(4), "localhost_12922");
+
+    keys = appConfig.getKeys(ConfigScopeProperty.PARTITION, _clusterName, "testResource");
+    Assert.assertEquals(keys.size(), 1, "should be [testPartition]");
+    Assert.assertEquals(keys.get(0), "testPartition");
+
+    keys = appConfig.getKeys(ConfigScopeProperty.RESOURCE, _clusterName, "testResource");
+    Assert.assertEquals(keys.size(), 1, "should be [resourceConfigKey]");
+    Assert.assertEquals(keys.get(0), "resourceConfigKey");
+
+    keys = appConfig.getKeys(ConfigScopeProperty.CLUSTER, _clusterName, _clusterName);
+    Assert.assertEquals(keys.size(), 1, "should be [clusterConfigKey]");
+    Assert.assertEquals(keys.get(0), "clusterConfigKey");
+
+    keys = appConfig.getKeys(ConfigScopeProperty.PARTICIPANT, _clusterName, "localhost_12918");
+    Assert.assertEquals(keys.size(), 4, "should be [HELIX_ENABLED, HELIX_HOST, HELIX_PORT, participantConfigKey]");
+    Assert.assertEquals(keys.get(3), "participantConfigKey");
+
+    keys = appConfig.getKeys(ConfigScopeProperty.PARTITION, _clusterName, "testResource", "testPartition");
+    Assert.assertEquals(keys.size(), 1, "should be [partitionConfigKey]");
+    Assert.assertEquals(keys.get(0), "partitionConfigKey");
+
+    // test configAccessor.remove()
+    appConfig.remove(clusterScope, "clusterConfigKey");
+    clusterConfigValue = appConfig.get(clusterScope, "clusterConfigKey");
+    Assert.assertNull(clusterConfigValue, "Should be null since it's removed");
+
+    appConfig.remove(resourceScope, "resourceConfigKey");
+    resourceConfigValue = appConfig.get(resourceScope, "resourceConfigKey");
+    Assert.assertNull(resourceConfigValue, "Should be null since it's removed");
+
+    appConfig.remove(partitionScope, "partitionConfigKey");
+    partitionConfigValue = appConfig.get(partitionScope, "partitionConfigKey");
+    Assert.assertNull(partitionConfigValue, "Should be null since it's removed");
+    
+    appConfig.remove(participantScope, "participantConfigKey");
+    participantConfigValue = appConfig.get(partitionScope, "participantConfigKey");
+    Assert.assertNull(participantConfigValue, "Should be null since it's removed");
+    
+    // negative tests
+    try
+    {
+      new ConfigScopeBuilder().forPartition("testPartition").build();
+      Assert.fail("Should fail since cluster name is not set");
+    } catch (Exception e)
+    {
+      // OK
+    }
+
+    try
+    {
+      new ConfigScopeBuilder().forCluster("testCluster").forPartition("testPartition").build();
+      Assert.fail("Should fail since resource name is not set");
+    } catch (Exception e)
+    {
+      // OK
+    }
+
+    try
+    {
+      new ConfigScopeBuilder().forParticipant("testParticipant").build();
+      Assert.fail("Should fail since cluster name is not set");
+    } catch (Exception e)
+    {
+      // OK
+    }
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-core/src/test/java/org/apache/helix/TestEspressoStorageClusterIdealState.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/TestEspressoStorageClusterIdealState.java b/helix-core/src/test/java/org/apache/helix/TestEspressoStorageClusterIdealState.java
new file mode 100644
index 0000000..0382f4e
--- /dev/null
+++ b/helix-core/src/test/java/org/apache/helix/TestEspressoStorageClusterIdealState.java
@@ -0,0 +1,320 @@
+/**
+ * Copyright (C) 2012 LinkedIn Inc <op...@linkedin.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.helix;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import org.apache.helix.ZNRecord;
+import org.apache.helix.model.IdealState;
+import org.apache.helix.tools.ClusterSetup;
+import org.apache.helix.tools.IdealStateCalculatorForStorageNode;
+import org.testng.Assert;
+import org.testng.AssertJUnit;
+import org.testng.annotations.Test;
+
+
+
+public class TestEspressoStorageClusterIdealState
+{
+  @Test ()
+  public void testEspressoStorageClusterIdealState() throws Exception
+  {
+    List<String> instanceNames = new ArrayList<String>();
+    for(int i = 0;i < 5; i++)
+    {
+      instanceNames.add("localhost:123" + i);
+    }
+    int partitions = 8, replicas = 0;
+    Map<String, Object> result0 = IdealStateCalculatorForStorageNode.calculateInitialIdealState(instanceNames, partitions, replicas);
+    Verify(result0, partitions,replicas);
+
+    partitions = 8192;
+    replicas = 3;
+
+    instanceNames.clear();
+    for(int i = 0;i < 20; i++)
+    {
+      instanceNames.add("localhost:123" + i);
+    }
+    Map<String, Object> resultOriginal = IdealStateCalculatorForStorageNode.calculateInitialIdealState(instanceNames, partitions, replicas);
+
+    Verify(resultOriginal, partitions,replicas);
+    printStat(resultOriginal);
+
+    Map<String, Object> result1 = IdealStateCalculatorForStorageNode.calculateInitialIdealState(instanceNames, partitions, replicas);
+
+    List<String> instanceNames2 = new ArrayList<String>();
+    for(int i = 30;i < 35; i++)
+    {
+      instanceNames2.add("localhost:123" + i);
+    }
+
+    IdealStateCalculatorForStorageNode.calculateNextIdealState(instanceNames2, result1);
+
+    List<String> instanceNames3 = new ArrayList<String>();
+    for(int i = 35;i < 40; i++)
+    {
+      instanceNames3.add("localhost:123" + i);
+    }
+
+    IdealStateCalculatorForStorageNode.calculateNextIdealState(instanceNames3, result1);
+    Double masterKeepRatio = 0.0, slaveKeepRatio = 0.0;
+    Verify(result1, partitions,replicas);
+    double[] result = compareResult(resultOriginal, result1);
+    masterKeepRatio = result[0];
+    slaveKeepRatio = result[1];
+    Assert.assertTrue(0.66 < masterKeepRatio && 0.67 > masterKeepRatio);
+    Assert.assertTrue(0.66 < slaveKeepRatio && 0.67 > slaveKeepRatio);
+
+  }
+  
+  @Test
+  public void testRebalance2()
+  {
+    int partitions = 1256, replicas = 3;
+    List<String> instanceNames = new ArrayList<String>();
+    
+    for(int i = 0;i < 10; i++)
+    {
+      instanceNames.add("localhost:123" + i);
+    }
+    
+    Map<String, Object> resultOriginal = IdealStateCalculatorForStorageNode.calculateInitialIdealState(instanceNames, partitions, replicas);
+    
+    ZNRecord idealState1 = IdealStateCalculatorForStorageNode.convertToZNRecord(resultOriginal, "TestDB", "MASTER", "SLAVE");
+    
+    Map<String, Object> result1 = ClusterSetup.buildInternalIdealState(new IdealState(idealState1));
+    
+    List<String> instanceNames2 = new ArrayList<String>();
+    for(int i = 30;i < 35; i++)
+    {
+      instanceNames2.add("localhost:123" + i);
+    }
+    
+    Map<String, Object> result2 = IdealStateCalculatorForStorageNode.calculateNextIdealState(instanceNames2, result1);
+    
+    Verify(resultOriginal, partitions,replicas);
+    Verify(result2, partitions,replicas);
+    Double masterKeepRatio = 0.0, slaveKeepRatio = 0.0;
+    double[] result = compareResult(resultOriginal, result2);
+    masterKeepRatio = result[0];
+    slaveKeepRatio = result[1];
+    Assert.assertTrue(0.66 < masterKeepRatio && 0.67 > masterKeepRatio);
+    Assert.assertTrue(0.66 < slaveKeepRatio && 0.67 > slaveKeepRatio);
+  }
+
+  public static void Verify(Map<String, Object> result, int partitions, int replicas)
+  {
+    Map<String, List<Integer>> masterAssignmentMap = (Map<String, List<Integer>>) (result.get("MasterAssignmentMap"));
+    Map<String, Map<String, List<Integer>>> nodeSlaveAssignmentMap = (Map<String, Map<String, List<Integer>>>)(result.get("SlaveAssignmentMap"));
+
+    AssertJUnit.assertTrue( partitions == (Integer)(result.get("partitions")));
+
+    // Verify master partitions covers all master partitions on each node
+    Map<Integer, Integer> masterCounterMap = new TreeMap<Integer, Integer>();
+    for(int i = 0;i<partitions; i++)
+    {
+      masterCounterMap.put(i, 0);
+    }
+
+    int minMasters = Integer.MAX_VALUE, maxMasters = Integer.MIN_VALUE;
+    for(String instanceName : masterAssignmentMap.keySet())
+    {
+      List<Integer> masterList = masterAssignmentMap.get(instanceName);
+      // the assert needs to be changed when weighting is introduced
+      // AssertJUnit.assertTrue(masterList.size() == partitions /masterAssignmentMap.size() | masterList.size() == (partitions /masterAssignmentMap.size()+1) );
+
+      for(Integer x : masterList)
+      {
+        AssertJUnit.assertTrue(masterCounterMap.get(x) == 0);
+        masterCounterMap.put(x,1);
+      }
+      if(minMasters > masterList.size())
+      {
+        minMasters = masterList.size();
+      }
+      if(maxMasters < masterList.size())
+      {
+        maxMasters = masterList.size();
+      }
+    }
+    // Master partition should be evenly distributed most of the time
+    System.out.println("Masters: max: "+maxMasters+" Min:"+ minMasters);
+    // Each master partition should occur only once
+    for(int i = 0;i < partitions; i++)
+    {
+      AssertJUnit.assertTrue(masterCounterMap.get(i) == 1);
+    }
+    AssertJUnit.assertTrue(masterCounterMap.size() == partitions);
+
+    // for each node, verify the master partitions and the slave partition assignment map
+    if(replicas == 0)
+    {
+      AssertJUnit.assertTrue(nodeSlaveAssignmentMap.size() == 0);
+      return;
+    }
+
+    AssertJUnit.assertTrue(masterAssignmentMap.size() == nodeSlaveAssignmentMap.size());
+    for(String instanceName: masterAssignmentMap.keySet())
+    {
+      AssertJUnit.assertTrue(nodeSlaveAssignmentMap.containsKey(instanceName));
+
+      Map<String, List<Integer>> slaveAssignmentMap = nodeSlaveAssignmentMap.get(instanceName);
+      Map<Integer, Integer> slaveCountMap = new TreeMap<Integer, Integer>();
+      List<Integer> masterList = masterAssignmentMap.get(instanceName);
+
+      for(Integer masterPartitionId : masterList)
+      {
+        slaveCountMap.put(masterPartitionId, 0);
+      }
+      // Make sure that masterList are covered replica times by the slave assignment.
+      int minSlaves = Integer.MAX_VALUE, maxSlaves = Integer.MIN_VALUE;
+      for(String hostInstance : slaveAssignmentMap.keySet())
+      {
+        List<Integer> slaveAssignment = slaveAssignmentMap.get(hostInstance);
+        Set<Integer> occurenceSet = new HashSet<Integer>();
+
+        // Each slave should occur only once in the list, since the list is per-node slaves
+        for(Integer slavePartition : slaveAssignment)
+        {
+          AssertJUnit.assertTrue(!occurenceSet.contains(slavePartition));
+          occurenceSet.add(slavePartition);
+
+          slaveCountMap.put(slavePartition, slaveCountMap.get(slavePartition) + 1);
+        }
+        if(minSlaves > slaveAssignment.size())
+        {
+          minSlaves = slaveAssignment.size();
+        }
+        if(maxSlaves < slaveAssignment.size())
+        {
+          maxSlaves = slaveAssignment.size();
+        }
+      }
+      // check if slave distribution is even
+      AssertJUnit.assertTrue(maxSlaves - minSlaves <= 1);
+      // System.out.println("Slaves: max: "+maxSlaves+" Min:"+ minSlaves);
+
+      // for each node, the slave assignment map should cover the masters for exactly replica
+      // times
+      AssertJUnit.assertTrue(slaveCountMap.size() == masterList.size());
+      for(Integer masterPartitionId : masterList)
+      {
+        AssertJUnit.assertTrue(slaveCountMap.get(masterPartitionId) == replicas);
+      }
+    }
+
+  }
+
+  public void printStat(Map<String, Object> result)
+  {
+    // print out master distribution
+
+    // print out slave distribution
+
+  }
+
+  public static double [] compareResult(Map<String, Object> result1, Map<String, Object> result2)
+  {
+    double [] result = new double[2];
+    Map<String, List<Integer>> masterAssignmentMap1 = (Map<String, List<Integer>>) (result1.get("MasterAssignmentMap"));
+    Map<String, Map<String, List<Integer>>> nodeSlaveAssignmentMap1 = (Map<String, Map<String, List<Integer>>>)(result1.get("SlaveAssignmentMap"));
+
+    Map<String, List<Integer>> masterAssignmentMap2 = (Map<String, List<Integer>>) (result2.get("MasterAssignmentMap"));
+    Map<String, Map<String, List<Integer>>> nodeSlaveAssignmentMap2 = (Map<String, Map<String, List<Integer>>>)(result2.get("SlaveAssignmentMap"));
+
+    int commonMasters = 0;
+    int commonSlaves = 0;
+    int partitions = (Integer)(result1.get("partitions"));
+    int replicas = (Integer)(result1.get("replicas"));
+
+    AssertJUnit.assertTrue((Integer)(result2.get("partitions")) == partitions);
+    AssertJUnit.assertTrue((Integer)(result2.get("replicas")) == replicas);
+
+    // masterMap1 maps from partition id to the holder instance name
+    Map<Integer, String> masterMap1 = new TreeMap<Integer, String>();
+    for(String instanceName : masterAssignmentMap1.keySet())
+    {
+      List<Integer> masterList1 = masterAssignmentMap1.get(instanceName);
+      for(Integer partition : masterList1)
+      {
+        AssertJUnit.assertTrue(!masterMap1.containsKey(partition));
+        masterMap1.put(partition, instanceName);
+      }
+    }
+    // go through masterAssignmentMap2 and find out the common number
+    for(String instanceName : masterAssignmentMap2.keySet())
+    {
+      List<Integer> masterList2 = masterAssignmentMap2.get(instanceName);
+      for(Integer partition : masterList2)
+      {
+        if(masterMap1.get(partition).equalsIgnoreCase(instanceName))
+        {
+          commonMasters ++;
+        }
+      }
+    }
+    
+    result[0] = 1.0*commonMasters/partitions;
+    System.out.println(commonMasters + " master partitions are kept, "+ (partitions - commonMasters) + " moved, keep ratio:" + 1.0*commonMasters/partitions);
+    
+    // maps from the partition id to the instance names that holds its slave partition
+    Map<Integer, Set<String>> slaveMap1 = new TreeMap<Integer, Set<String>>();
+    for(String instanceName : nodeSlaveAssignmentMap1.keySet())
+    {
+      Map<String, List<Integer>> slaveAssignment1 = nodeSlaveAssignmentMap1.get(instanceName);
+      for(String slaveHostName : slaveAssignment1.keySet())
+      {
+        List<Integer> slaveList = slaveAssignment1.get(slaveHostName);
+        for(Integer partition : slaveList)
+        {
+          if(!slaveMap1.containsKey(partition))
+          {
+            slaveMap1.put(partition, new TreeSet<String>());
+          }
+          AssertJUnit.assertTrue(!slaveMap1.get(partition).contains(slaveHostName));
+          slaveMap1.get(partition).add(slaveHostName);
+        }
+      }
+    }
+
+    for(String instanceName : nodeSlaveAssignmentMap2.keySet())
+    {
+      Map<String, List<Integer>> slaveAssignment2 = nodeSlaveAssignmentMap2.get(instanceName);
+      for(String slaveHostName : slaveAssignment2.keySet())
+      {
+        List<Integer> slaveList = slaveAssignment2.get(slaveHostName);
+        for(Integer partition : slaveList)
+        {
+          if(slaveMap1.get(partition).contains(slaveHostName))
+          {
+            commonSlaves++;
+          }
+        }
+      }
+    }
+    result[1] = 1.0*commonSlaves/partitions/replicas;
+    System.out.println(commonSlaves + " slave partitions are kept, " + (partitions * replicas - commonSlaves)+ " moved. keep ratio:"+1.0*commonSlaves/partitions/replicas);
+    return result;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-core/src/test/java/org/apache/helix/TestGetProperty.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/TestGetProperty.java b/helix-core/src/test/java/org/apache/helix/TestGetProperty.java
new file mode 100644
index 0000000..63139bc
--- /dev/null
+++ b/helix-core/src/test/java/org/apache/helix/TestGetProperty.java
@@ -0,0 +1,48 @@
+/**
+ * Copyright (C) 2012 LinkedIn Inc <op...@linkedin.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.helix;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Properties;
+
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+public class TestGetProperty
+{
+  @Test
+  public void testGetProperty()
+  {
+    String version;
+    Properties props = new Properties();
+
+    try
+    {
+      InputStream stream = Thread.currentThread().getContextClassLoader()
+          .getResourceAsStream("cluster-manager-version.properties");
+      props.load(stream);
+      version = props.getProperty("clustermanager.version");
+      Assert.assertNotNull(version);
+      System.out.println("cluster-manager-version:" + version);
+    }
+    catch (IOException e)
+    {
+      // e.printStackTrace();
+      Assert.fail("could not open cluster-manager-version.properties. ", e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-core/src/test/java/org/apache/helix/TestGroupCommit.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/TestGroupCommit.java b/helix-core/src/test/java/org/apache/helix/TestGroupCommit.java
new file mode 100644
index 0000000..697349c
--- /dev/null
+++ b/helix-core/src/test/java/org/apache/helix/TestGroupCommit.java
@@ -0,0 +1,57 @@
+package org.apache.helix;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import org.apache.helix.BaseDataAccessor;
+import org.apache.helix.GroupCommit;
+import org.apache.helix.ZNRecord;
+
+
+public class TestGroupCommit
+{
+  // @Test
+  public void testGroupCommit() throws InterruptedException
+  {
+    final BaseDataAccessor<ZNRecord> accessor = new Mocks.MockBaseDataAccessor();
+    final GroupCommit commit = new GroupCommit();
+    ExecutorService newFixedThreadPool = Executors.newFixedThreadPool(400);
+    for (int i = 0; i < 2400; i++)
+    {
+      Runnable runnable = new MyClass(accessor, commit, i);
+      newFixedThreadPool.submit(runnable);
+    }
+    Thread.sleep(10000);
+    System.out.println(accessor.get("test", null, 0));
+    System.out.println(accessor.get("test", null, 0).getSimpleFields().size());
+  }
+
+}
+
+class MyClass implements Runnable
+{
+  private final BaseDataAccessor<ZNRecord> store;
+  private final GroupCommit                commit;
+  private final int                        i;
+
+  public MyClass(BaseDataAccessor<ZNRecord> store, GroupCommit commit, int i)
+  {
+    this.store = store;
+    this.commit = commit;
+    this.i = i;
+  }
+
+  @Override
+  public void run()
+  {
+    // System.out.println("START " + System.currentTimeMillis() + " --"
+    // + Thread.currentThread().getId());
+    ZNRecord znRecord = new ZNRecord("test");
+    znRecord.setSimpleField("test_id" + i, "" + i);
+    commit.commit(store, 0, "test", znRecord);
+    store.get("test", null, 0).getSimpleField("");
+    // System.out.println("END " + System.currentTimeMillis() + " --"
+    // + Thread.currentThread().getId());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-core/src/test/java/org/apache/helix/TestHelixTaskExecutor.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/TestHelixTaskExecutor.java b/helix-core/src/test/java/org/apache/helix/TestHelixTaskExecutor.java
new file mode 100644
index 0000000..e7db93f
--- /dev/null
+++ b/helix-core/src/test/java/org/apache/helix/TestHelixTaskExecutor.java
@@ -0,0 +1,89 @@
+/**
+ * Copyright (C) 2012 LinkedIn Inc <op...@linkedin.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.helix;
+
+import org.apache.helix.HelixDataAccessor;
+import org.apache.helix.NotificationContext;
+import org.apache.helix.Mocks.MockHelixTaskExecutor;
+import org.apache.helix.Mocks.MockManager;
+import org.apache.helix.Mocks.MockStateModel;
+import org.apache.helix.PropertyKey.Builder;
+import org.apache.helix.messaging.handling.AsyncCallbackService;
+import org.apache.helix.messaging.handling.HelixStateTransitionHandler;
+import org.apache.helix.model.CurrentState;
+import org.apache.helix.model.Message;
+import org.apache.helix.model.StateModelDefinition;
+import org.apache.helix.model.Message.MessageType;
+import org.apache.helix.tools.StateModelConfigGenerator;
+import org.testng.AssertJUnit;
+import org.testng.annotations.Test;
+
+
+public class TestHelixTaskExecutor
+{
+
+  @Test()
+  public void testCMTaskExecutor() throws Exception
+  {
+    System.out.println("START TestCMTaskExecutor");
+    String msgId = "TestMessageId";
+    Message message = new Message(MessageType.TASK_REPLY, msgId);
+
+    message.setMsgId(msgId);
+    message.setSrcName("cm-instance-0");
+    message.setTgtName("cm-instance-1");
+    message.setTgtSessionId("1234");
+    message.setFromState("Offline");
+    message.setToState("Slave");
+    message.setPartitionName("TestDB_0");
+    message.setResourceName("TestDB");
+    message.setStateModelDef("MasterSlave");
+
+    MockManager manager = new MockManager("clusterName");
+    // DataAccessor accessor = manager.getDataAccessor();
+    HelixDataAccessor accessor = manager.getHelixDataAccessor();
+    StateModelConfigGenerator generator = new StateModelConfigGenerator();
+    StateModelDefinition stateModelDef =
+        new StateModelDefinition(generator.generateConfigForMasterSlave());
+    Builder keyBuilder = accessor.keyBuilder();
+    accessor.setProperty(keyBuilder.stateModelDef("MasterSlave"), stateModelDef);
+
+    MockHelixTaskExecutor executor = new MockHelixTaskExecutor();
+    MockStateModel stateModel = new MockStateModel();
+    NotificationContext context;
+    executor.registerMessageHandlerFactory(MessageType.TASK_REPLY.toString(),
+                                           new AsyncCallbackService());
+    // String clusterName =" testcluster";
+    context = new NotificationContext(manager);
+    CurrentState currentStateDelta = new CurrentState("TestDB");
+    currentStateDelta.setState("TestDB_0", "OFFLINE");
+    HelixStateTransitionHandler handler =
+        new HelixStateTransitionHandler(stateModel,
+                                        message,
+                                        context,
+                                        currentStateDelta,
+                                        executor);
+
+    executor.scheduleTask(message, handler, context);
+    while (!executor.isDone(msgId + "/" + message.getPartitionName()))
+    {
+      Thread.sleep(500);
+    }
+    AssertJUnit.assertTrue(stateModel.stateModelInvoked);
+    System.out.println("END TestCMTaskExecutor");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/437eb42e/helix-core/src/test/java/org/apache/helix/TestHelixTaskHandler.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/TestHelixTaskHandler.java b/helix-core/src/test/java/org/apache/helix/TestHelixTaskHandler.java
new file mode 100644
index 0000000..c24482f
--- /dev/null
+++ b/helix-core/src/test/java/org/apache/helix/TestHelixTaskHandler.java
@@ -0,0 +1,130 @@
+/**
+ * Copyright (C) 2012 LinkedIn Inc <op...@linkedin.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.helix;
+
+import java.util.Date;
+
+import org.apache.helix.HelixConstants;
+import org.apache.helix.HelixDataAccessor;
+import org.apache.helix.NotificationContext;
+import org.apache.helix.Mocks.MockManager;
+import org.apache.helix.Mocks.MockStateModel;
+import org.apache.helix.Mocks.MockStateModelAnnotated;
+import org.apache.helix.PropertyKey.Builder;
+import org.apache.helix.messaging.handling.HelixStateTransitionHandler;
+import org.apache.helix.messaging.handling.HelixTask;
+import org.apache.helix.messaging.handling.HelixTaskExecutor;
+import org.apache.helix.model.CurrentState;
+import org.apache.helix.model.Message;
+import org.apache.helix.model.StateModelDefinition;
+import org.apache.helix.model.Message.MessageType;
+import org.apache.helix.tools.StateModelConfigGenerator;
+import org.testng.AssertJUnit;
+import org.testng.annotations.Test;
+
+
+public class TestHelixTaskHandler
+{
+  @Test()
+  public void testInvocation() throws Exception
+  {
+    HelixTaskExecutor executor = new HelixTaskExecutor();
+    System.out.println("START TestCMTaskHandler.testInvocation()");
+    Message message = new Message(MessageType.STATE_TRANSITION, "Some unique id");
+
+    message.setSrcName("cm-instance-0");
+    message.setTgtSessionId("1234");
+    message.setFromState("Offline");
+    message.setToState("Slave");
+    message.setPartitionName("TestDB_0");
+    message.setMsgId("Some unique message id");
+    message.setResourceName("TestDB");
+    message.setTgtName("localhost");
+    message.setStateModelDef("MasterSlave");
+    message.setStateModelFactoryName(HelixConstants.DEFAULT_STATE_MODEL_FACTORY);
+    MockStateModel stateModel = new MockStateModel();
+    NotificationContext context;
+    MockManager manager = new MockManager("clusterName");
+//    DataAccessor accessor = manager.getDataAccessor();
+    HelixDataAccessor accessor = manager.getHelixDataAccessor();
+    StateModelConfigGenerator generator = new StateModelConfigGenerator();
+    StateModelDefinition stateModelDef = new StateModelDefinition(
+        generator.generateConfigForMasterSlave());
+    Builder keyBuilder = accessor.keyBuilder();
+    accessor.setProperty(keyBuilder.stateModelDef("MasterSlave"), stateModelDef);
+
+    context = new NotificationContext(manager);
+    CurrentState currentStateDelta = new CurrentState("TestDB");
+    currentStateDelta.setState("TestDB_0", "OFFLINE");
+
+    HelixStateTransitionHandler stHandler = new HelixStateTransitionHandler(stateModel, message,
+        context, currentStateDelta, executor);
+    HelixTask handler;
+    handler = new HelixTask(message, context, stHandler, executor);
+    handler.call();
+    AssertJUnit.assertTrue(stateModel.stateModelInvoked);
+    System.out.println("END TestCMTaskHandler.testInvocation() at "
+        + new Date(System.currentTimeMillis()));
+  }
+
+  @Test()
+  public void testInvocationAnnotated() throws Exception
+  {
+    System.out.println("START TestCMTaskHandler.testInvocationAnnotated() at "
+        + new Date(System.currentTimeMillis()));
+    HelixTaskExecutor executor = new HelixTaskExecutor();
+    Message message = new Message(MessageType.STATE_TRANSITION, "Some unique id");
+    message.setSrcName("cm-instance-0");
+    message.setTgtSessionId("1234");
+    message.setFromState("Offline");
+    message.setToState("Slave");
+    message.setPartitionName("TestDB_0");
+    message.setMsgId("Some unique message id");
+    message.setResourceName("TestDB");
+    message.setTgtName("localhost");
+    message.setStateModelDef("MasterSlave");
+    message.setStateModelFactoryName(HelixConstants.DEFAULT_STATE_MODEL_FACTORY);
+    MockStateModelAnnotated stateModel = new MockStateModelAnnotated();
+    NotificationContext context;
+
+    MockManager manager = new MockManager("clusterName");
+//    DataAccessor accessor = manager.getDataAccessor();
+    HelixDataAccessor accessor = manager.getHelixDataAccessor();
+
+    StateModelConfigGenerator generator = new StateModelConfigGenerator();
+    StateModelDefinition stateModelDef = new StateModelDefinition(
+        generator.generateConfigForMasterSlave());
+//    accessor.setProperty(PropertyType.STATEMODELDEFS, stateModelDef, "MasterSlave");
+    Builder keyBuilder = accessor.keyBuilder();
+    accessor.setProperty(keyBuilder.stateModelDef("MasterSlave"), stateModelDef);
+
+
+    context = new NotificationContext(manager);
+    
+    CurrentState currentStateDelta = new CurrentState("TestDB");
+    currentStateDelta.setState("TestDB_0", "OFFLINE");
+
+    HelixStateTransitionHandler stHandler = new HelixStateTransitionHandler(stateModel, message,
+        context, currentStateDelta, executor);
+
+    HelixTask handler = new HelixTask(message, context, stHandler, executor);
+    handler.call();
+    AssertJUnit.assertTrue(stateModel.stateModelInvoked);
+    System.out.println("END TestCMTaskHandler.testInvocationAnnotated() at "
+        + new Date(System.currentTimeMillis()));
+  }
+
+}